From a343da2f6f1b49e552fc4e79a7fcbda9d10a54a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Jun 2022 09:56:24 -0400 Subject: [PATCH 01/48] chore(deps): bump github.com/klauspost/compress from 1.15.5 to 1.15.6 (#2582) Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.15.5 to 1.15.6. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.15.5...v1.15.6) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1d7e6ba5b4..dcb3274a85 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/ipfs/go-ds-badger2 v0.1.1 github.com/ipfs/go-ipns v0.1.2 //indirect github.com/jpillora/ipfilter v1.2.5 - github.com/klauspost/compress v1.15.5 + github.com/klauspost/compress v1.15.6 github.com/libp2p/go-libp2p v0.15.1 github.com/libp2p/go-libp2p-core v0.9.0 github.com/libp2p/go-libp2p-discovery v0.5.1 diff --git a/go.sum b/go.sum index 637f9e7590..a273fc554e 100644 --- a/go.sum +++ b/go.sum @@ -635,8 +635,8 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.5 h1:qyCLMz2JCrKADihKOh9FxnW3houKeNsp2h5OEz0QSEA= -github.com/klauspost/compress v1.15.5/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= +github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= From e5c8cf5bad4bb4ed280c0ecb195ffe4bcbf26bf9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Thu, 9 Jun 2022 15:31:11 -0400 Subject: [PATCH 02/48] fix(state/epoch): assign epoch 1 when block number is 0 (#2592) * fix: assign `nextEpoch` to 1 when block number 0 --- dot/state/epoch.go | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/dot/state/epoch.go b/dot/state/epoch.go index 2862e622df..c53ed7f799 100644 --- a/dot/state/epoch.go +++ b/dot/state/epoch.go @@ -546,13 +546,16 @@ func (s *EpochState) FinalizeBABENextEpochData(finalizedHeader *types.Header) er s.nextEpochDataLock.Lock() defer s.nextEpochDataLock.Unlock() - finalizedBlockEpoch, err := s.GetEpochForBlock(finalizedHeader) - if err != nil { - return fmt.Errorf("cannot get epoch for block %d (%s): %w", - finalizedHeader.Number, finalizedHeader.Hash(), err) - } + var nextEpoch uint64 = 1 + if finalizedHeader.Number != 0 { + finalizedBlockEpoch, err := s.GetEpochForBlock(finalizedHeader) + if err != nil { + return fmt.Errorf("cannot get epoch for block %d (%s): %w", + finalizedHeader.Number, finalizedHeader.Hash(), err) + } - nextEpoch := finalizedBlockEpoch + 1 + nextEpoch = finalizedBlockEpoch + 1 + } epochInDatabase, err := s.getEpochDataFromDatabase(nextEpoch) @@ -600,13 +603,16 @@ func (s *EpochState) FinalizeBABENextConfigData(finalizedHeader *types.Header) e s.nextConfigDataLock.Lock() defer s.nextConfigDataLock.Unlock() - finalizedBlockEpoch, err := s.GetEpochForBlock(finalizedHeader) - if err != nil { - return fmt.Errorf("cannot get epoch for block %d (%s): %w", - finalizedHeader.Number, finalizedHeader.Hash(), err) - } + var nextEpoch uint64 = 1 + if finalizedHeader.Number != 0 { + finalizedBlockEpoch, err := s.GetEpochForBlock(finalizedHeader) + if err != nil { + return fmt.Errorf("cannot get epoch for block %d (%s): %w", + finalizedHeader.Number, finalizedHeader.Hash(), err) + } - nextEpoch := finalizedBlockEpoch + 1 + nextEpoch = finalizedBlockEpoch + 1 + } configInDatabase, err := s.getConfigDataFromDatabase(nextEpoch) From 8582cb20a37d574e6397f980f919c6dd4dfb24a0 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Thu, 9 Jun 2022 18:30:28 -0400 Subject: [PATCH 03/48] chore(tests): cache runtimes to work offline (#2577) - Download runtimes to ${TMP}/gossamer/runtimes/ depending on environment - Do not remove runtime files and re-use them if found - Merge `GetRuntimeVars` and `GetRuntimeBlob` into `GetRuntime` - Improve HTTP fetching code for runtime wasms - Remove `TestMain`-used `GetRuntimes` since we already have calls to `GetRuntime` wherever a runtime blob is needed. --- cmd/gossamer/config_test.go | 5 +- dot/core/service_integration_test.go | 23 +--- dot/node_integration_test.go | 5 +- dot/rpc/modules/author_integration_test.go | 22 +--- dot/rpc/subscription/listeners_test.go | 8 +- dot/sync/syncer_integeration_test.go | 15 --- lib/babe/babe_integration_test.go | 17 --- lib/runtime/constants.go | 2 - lib/runtime/life/test_helpers.go | 22 ++-- lib/runtime/test_helpers.go | 144 ++++++++++++--------- lib/runtime/wasmer/imports_test.go | 16 --- lib/runtime/wasmer/instance_test.go | 9 +- lib/runtime/wasmer/test_helpers.go | 24 ++-- 13 files changed, 125 insertions(+), 187 deletions(-) diff --git a/cmd/gossamer/config_test.go b/cmd/gossamer/config_test.go index 76f1c3ab58..0a744d5369 100644 --- a/cmd/gossamer/config_test.go +++ b/cmd/gossamer/config_test.go @@ -4,6 +4,7 @@ package main import ( + "context" "encoding/hex" "encoding/json" "errors" @@ -941,9 +942,7 @@ func TestGlobalNodeName_WhenNodeAlreadyHasStoredName(t *testing.T) { cfg := newTestConfig(t) cfg.Global.Name = globalName - runtimeFilePath := filepath.Join(t.TempDir(), "runtime") - _, testRuntimeURL := runtime.GetRuntimeVars(runtime.NODE_RUNTIME) - err := runtime.GetRuntimeBlob(runtimeFilePath, testRuntimeURL) + runtimeFilePath, err := runtime.GetRuntime(context.Background(), runtime.NODE_RUNTIME) require.NoError(t, err) runtimeData, err := os.ReadFile(runtimeFilePath) require.NoError(t, err) diff --git a/dot/core/service_integration_test.go b/dot/core/service_integration_test.go index a37903c065..f60dc989cf 100644 --- a/dot/core/service_integration_test.go +++ b/dot/core/service_integration_test.go @@ -6,6 +6,7 @@ package core import ( + "context" "fmt" "math/big" "os" @@ -134,20 +135,6 @@ func generateTestValidRemarkTxns(t *testing.T, pubKey []byte, accInfo types.Acco return extBytes, rt } -func TestMain(m *testing.M) { - wasmFilePaths, err := runtime.GenerateRuntimeWasmFile() - if err != nil { - log.Errorf("failed to generate runtime wasm file: %s", err) - os.Exit(1) - } - - // Start all tests - code := m.Run() - - runtime.RemoveFiles(wasmFilePaths) - os.Exit(code) -} - func TestStartService(t *testing.T) { s := NewTestService(t, nil) @@ -695,7 +682,9 @@ func TestService_HandleRuntimeChanges(t *testing.T) { func TestService_HandleCodeSubstitutes(t *testing.T) { s := NewTestService(t, nil) - testRuntime, err := os.ReadFile(runtime.POLKADOT_RUNTIME_FP) + runtimeFilepath, err := runtime.GetRuntime(context.Background(), runtime.POLKADOT_RUNTIME) + require.NoError(t, err) + testRuntime, err := os.ReadFile(runtimeFilepath) require.NoError(t, err) // hash for known test code substitution @@ -745,7 +734,9 @@ func TestService_HandleRuntimeChangesAfterCodeSubstitutes(t *testing.T) { require.NoError(t, err) require.Equal(t, codeHashBefore, parentRt.GetCodeHash()) // codeHash should remain unchanged after code substitute - testRuntime, err := os.ReadFile(runtime.POLKADOT_RUNTIME_FP) + runtimeFilepath, err := runtime.GetRuntime(context.Background(), runtime.POLKADOT_RUNTIME) + require.NoError(t, err) + testRuntime, err := os.ReadFile(runtimeFilepath) require.NoError(t, err) ts, err = s.storageState.TrieState(nil) diff --git a/dot/node_integration_test.go b/dot/node_integration_test.go index cb16036a60..dfa18c2b69 100644 --- a/dot/node_integration_test.go +++ b/dot/node_integration_test.go @@ -6,6 +6,7 @@ package dot import ( + "context" "encoding/hex" "encoding/json" "os" @@ -271,9 +272,7 @@ func TestNode_PersistGlobalName_WhenInitialize(t *testing.T) { // newTestGenesisAndRuntime create a new test runtime and a new test genesis // file with the test runtime stored in raw data and returns the genesis file func newTestGenesisAndRuntime(t *testing.T) (filename string) { - runtimeFilePath := filepath.Join(t.TempDir(), "runtime") - _, testRuntimeURL := runtime.GetRuntimeVars(runtime.NODE_RUNTIME) - err := runtime.GetRuntimeBlob(runtimeFilePath, testRuntimeURL) + runtimeFilePath, err := runtime.GetRuntime(context.Background(), runtime.NODE_RUNTIME) require.NoError(t, err) runtimeData, err := os.ReadFile(runtimeFilePath) require.NoError(t, err) diff --git a/dot/rpc/modules/author_integration_test.go b/dot/rpc/modules/author_integration_test.go index 9930ece237..385243f763 100644 --- a/dot/rpc/modules/author_integration_test.go +++ b/dot/rpc/modules/author_integration_test.go @@ -6,6 +6,7 @@ package modules import ( + "context" "errors" "fmt" "os" @@ -53,16 +54,11 @@ func useInstanceFromGenesis(t *testing.T, rtStorage *storage.TrieState) (instanc } func useInstanceFromRuntimeV0910(t *testing.T, rtStorage *storage.TrieState) (instance runtime.Instance) { - testRuntimeFilePath, testRuntimeURL := runtime.GetRuntimeVars(runtime.POLKADOT_RUNTIME_v0910) - err := runtime.GetRuntimeBlob(testRuntimeFilePath, testRuntimeURL) + testRuntimeFilePath, err := runtime.GetRuntime(context.Background(), runtime.POLKADOT_RUNTIME_v0910) require.NoError(t, err) - bytes, err := os.ReadFile(testRuntimeFilePath) require.NoError(t, err) - err = runtime.RemoveFiles([]string{testRuntimeFilePath}) - require.NoError(t, err) - rtStorage.Set(common.CodeKey, bytes) cfg := &wasmer.Config{} @@ -82,20 +78,6 @@ func useInstanceFromRuntimeV0910(t *testing.T, rtStorage *storage.TrieState) (in return runtimeInstance } -func TestMain(m *testing.M) { - wasmFilePaths, err := runtime.GenerateRuntimeWasmFile() - if err != nil { - log.Errorf("failed to generate runtime wasm file: %s", err) - os.Exit(1) - } - - // Start all tests - code := m.Run() - - runtime.RemoveFiles(wasmFilePaths) - os.Exit(code) -} - func TestAuthorModule_Pending_Integration(t *testing.T) { t.Parallel() integrationTestController := setupStateAndRuntime(t, t.TempDir(), nil) diff --git a/dot/rpc/subscription/listeners_test.go b/dot/rpc/subscription/listeners_test.go index 2e8a2f44e9..6576216e91 100644 --- a/dot/rpc/subscription/listeners_test.go +++ b/dot/rpc/subscription/listeners_test.go @@ -4,13 +4,13 @@ package subscription import ( + "context" "encoding/json" "fmt" "log" "net/http" "net/http/httptest" "os" - "path/filepath" "strings" "testing" "time" @@ -352,11 +352,9 @@ func TestRuntimeChannelListener_Listen(t *testing.T) { expectedInitialResponse.Params.Result = expectedInitialVersion instance := wasmer.NewTestInstance(t, runtime.NODE_RUNTIME) - err := runtime.GetRuntimeBlob(runtime.POLKADOT_RUNTIME_FP, runtime.POLKADOT_RUNTIME_URL) + polkadotRuntimeFilepath, err := runtime.GetRuntime(context.Background(), runtime.POLKADOT_RUNTIME) require.NoError(t, err) - fp, err := filepath.Abs(runtime.POLKADOT_RUNTIME_FP) - require.NoError(t, err) - code, err := os.ReadFile(fp) + code, err := os.ReadFile(polkadotRuntimeFilepath) require.NoError(t, err) version, err := instance.CheckRuntimeVersion(code) require.NoError(t, err) diff --git a/dot/sync/syncer_integeration_test.go b/dot/sync/syncer_integeration_test.go index aac17d73e3..e495534d11 100644 --- a/dot/sync/syncer_integeration_test.go +++ b/dot/sync/syncer_integeration_test.go @@ -8,7 +8,6 @@ package sync import ( "errors" - "os" "path/filepath" "testing" @@ -28,20 +27,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestMain(m *testing.M) { - wasmFilePaths, err := runtime.GenerateRuntimeWasmFile() - if err != nil { - log.Errorf("failed to generate runtime wasm file: %s", err) - os.Exit(1) - } - - // Start all tests - code := m.Run() - - runtime.RemoveFiles(wasmFilePaths) - os.Exit(code) -} - func newMockFinalityGadget() *mocks.FinalityGadget { m := new(mocks.FinalityGadget) // using []uint8 instead of []byte: https://github.com/stretchr/testify/pull/969 diff --git a/lib/babe/babe_integration_test.go b/lib/babe/babe_integration_test.go index 1037eda302..df93914d12 100644 --- a/lib/babe/babe_integration_test.go +++ b/lib/babe/babe_integration_test.go @@ -6,7 +6,6 @@ package babe import ( - "os" "path/filepath" "testing" "time" @@ -153,22 +152,6 @@ func createTestService(t *testing.T, cfg *ServiceConfig) *Service { return babeService } -func TestMain(m *testing.M) { - wasmFilePaths, err := runtime.GenerateRuntimeWasmFile() - if err != nil { - log.Errorf("failed to generate runtime wasm file: %s", err) - os.Exit(1) - } - - logger = log.NewFromGlobal(log.SetLevel(defaultTestLogLvl)) - - // Start all tests - code := m.Run() - - runtime.RemoveFiles(wasmFilePaths) - os.Exit(code) -} - func newTestServiceSetupParameters(t *testing.T) (*Service, *state.EpochState, *types.BabeConfiguration) { ctrl := gomock.NewController(t) telemetryMock := NewMockClient(ctrl) diff --git a/lib/runtime/constants.go b/lib/runtime/constants.go index 378aacb556..675d4952f8 100644 --- a/lib/runtime/constants.go +++ b/lib/runtime/constants.go @@ -7,8 +7,6 @@ import ( "github.com/ChainSafe/gossamer/lib/common" ) -var runtimes = []string{HOST_API_TEST_RUNTIME, POLKADOT_RUNTIME, POLKADOT_RUNTIME_v0917, NODE_RUNTIME, DEV_RUNTIME} - //nolint:revive const ( // v0.9 substrate runtime diff --git a/lib/runtime/life/test_helpers.go b/lib/runtime/life/test_helpers.go index b0a521b8e6..2ed1340b63 100644 --- a/lib/runtime/life/test_helpers.go +++ b/lib/runtime/life/test_helpers.go @@ -4,7 +4,7 @@ package life import ( - "path/filepath" + "context" "testing" "github.com/ChainSafe/gossamer/internal/log" @@ -20,30 +20,24 @@ var DefaultTestLogLvl = log.Info // newTestInstance will create a new runtime instance using the given target runtime func newTestInstance(t *testing.T, targetRuntime string) *Instance { - return newTestInstanceWithTrie(t, runtime.HOST_API_TEST_RUNTIME, nil, DefaultTestLogLvl) + return newTestInstanceWithTrie(t, targetRuntime, nil, DefaultTestLogLvl) } // newTestInstanceWithTrie will create a new runtime instance with the supplied trie as the storage func newTestInstanceWithTrie(t *testing.T, targetRuntime string, tt *trie.Trie, lvl log.Level) *Instance { - fp, cfg := setupConfig(t, targetRuntime, tt, lvl, 0) - r, err := NewInstanceFromFile(fp, cfg) + testRuntimeFilePath, err := runtime.GetRuntime(context.Background(), targetRuntime) + require.NoError(t, err) + cfg := setupConfig(t, tt, lvl, 0) + r, err := NewInstanceFromFile(testRuntimeFilePath, cfg) require.NoError(t, err, "Got error when trying to create new VM", "targetRuntime", targetRuntime) require.NotNil(t, r, "Could not create new VM instance", "targetRuntime", targetRuntime) return r } -func setupConfig(t *testing.T, targetRuntime string, tt *trie.Trie, lvl log.Level, role byte) (string, *Config) { - testRuntimeFilePath, testRuntimeURL := runtime.GetRuntimeVars(targetRuntime) - - err := runtime.GetRuntimeBlob(testRuntimeFilePath, testRuntimeURL) - require.Nil(t, err, "Fail: could not get runtime", "targetRuntime", targetRuntime) - +func setupConfig(t *testing.T, tt *trie.Trie, lvl log.Level, role byte) *Config { s, err := storage.NewTrieState(tt) require.NoError(t, err) - fp, err := filepath.Abs(testRuntimeFilePath) - require.Nil(t, err, "could not create testRuntimeFilePath", "targetRuntime", targetRuntime) - ns := runtime.NodeStorage{ LocalStorage: runtime.NewInMemoryDB(t), PersistentStorage: runtime.NewInMemoryDB(t), // we're using a local storage here since this is a test runtime @@ -56,5 +50,5 @@ func setupConfig(t *testing.T, targetRuntime string, tt *trie.Trie, lvl log.Leve cfg.Network = new(runtime.TestRuntimeNetwork) cfg.Role = role cfg.Resolver = new(Resolver) - return fp, cfg + return cfg } diff --git a/lib/runtime/test_helpers.go b/lib/runtime/test_helpers.go index 8d437fa791..ac8c706986 100644 --- a/lib/runtime/test_helpers.go +++ b/lib/runtime/test_helpers.go @@ -5,10 +5,13 @@ package runtime import ( "context" + "errors" + "fmt" "io" "net/http" "os" "path" + "path/filepath" "testing" "time" @@ -42,66 +45,115 @@ func NewInMemoryDB(t *testing.T) chaindb.Database { return db } -// GetRuntimeVars returns the testRuntimeFilePath and testRuntimeURL -func GetRuntimeVars(targetRuntime string) (string, string) { - switch targetRuntime { +var ( + ErrRuntimeUnknown = errors.New("runtime is not known") + ErrHTTPStatusNotOK = errors.New("HTTP status code received is not OK") + ErrOpenRuntimeFile = errors.New("cannot open the runtime target file") +) + +// GetRuntime returns the runtime file path located in the +// /tmp/gossamer/runtimes directory (depending on OS and environment). +// If the file did not exist, the runtime WASM blob is downloaded to that file. +func GetRuntime(ctx context.Context, runtime string) ( + runtimePath string, err error) { + basePath := filepath.Join(os.TempDir(), "/gossamer/runtimes/") + const perm = os.FileMode(0777) + err = os.MkdirAll(basePath, perm) + if err != nil { + return "", fmt.Errorf("cannot create directory for runtimes: %w", err) + } + + var runtimeFilename, url string + switch runtime { case NODE_RUNTIME: - return GetAbsolutePath(NODE_RUNTIME_FP), NODE_RUNTIME_URL + runtimeFilename = NODE_RUNTIME_FP + url = NODE_RUNTIME_URL case NODE_RUNTIME_v098: - return GetAbsolutePath(NODE_RUNTIME_FP_v098), NODE_RUNTIME_URL_v098 + runtimeFilename = NODE_RUNTIME_FP_v098 + url = NODE_RUNTIME_URL_v098 case POLKADOT_RUNTIME_v0917: - return GetAbsolutePath(POLKADOT_RUNTIME_FP_v0917), POLKADOT_RUNTIME_URL_v0917 + runtimeFilename = POLKADOT_RUNTIME_FP_v0917 + url = POLKADOT_RUNTIME_URL_v0917 case POLKADOT_RUNTIME_v0910: - return GetAbsolutePath(POLKADOT_RUNTIME_FP_v0910), POLKADOT_RUNTIME_URL_v0910 + runtimeFilename = POLKADOT_RUNTIME_FP_v0910 + url = POLKADOT_RUNTIME_URL_v0910 case POLKADOT_RUNTIME: - return GetAbsolutePath(POLKADOT_RUNTIME_FP), POLKADOT_RUNTIME_URL + runtimeFilename = POLKADOT_RUNTIME_FP + url = POLKADOT_RUNTIME_URL case HOST_API_TEST_RUNTIME: - return GetAbsolutePath(HOST_API_TEST_RUNTIME_FP), HOST_API_TEST_RUNTIME_URL + runtimeFilename = HOST_API_TEST_RUNTIME_FP + url = HOST_API_TEST_RUNTIME_URL case DEV_RUNTIME: - return GetAbsolutePath(DEV_RUNTIME_FP), DEV_RUNTIME_URL + runtimeFilename = DEV_RUNTIME_FP + url = DEV_RUNTIME_URL default: - return "", "" + return "", fmt.Errorf("%w: %s", ErrRuntimeUnknown, runtime) } -} -// GetAbsolutePath returns the completePath for a given targetDir -func GetAbsolutePath(targetDir string) string { - dir, err := os.Getwd() + runtimePath = filepath.Join(basePath, runtimeFilename) + runtimePath, err = filepath.Abs(runtimePath) if err != nil { - panic("failed to get current working directory") + return "", fmt.Errorf("malformed relative path: %w", err) } - return path.Join(dir, targetDir) -} -// GetRuntimeBlob checks if the test wasm @testRuntimeFilePath exists and if not, it fetches it from @testRuntimeURL -func GetRuntimeBlob(testRuntimeFilePath, testRuntimeURL string) error { - if utils.PathExists(testRuntimeFilePath) { - return nil + if utils.PathExists(runtimePath) { + return runtimePath, nil } - ctx, cancel := context.WithCancel(context.Background()) + const requestTimeout = 10 * time.Second + ctx, cancel := context.WithTimeout(ctx, requestTimeout) defer cancel() - req, err := http.NewRequestWithContext(ctx, http.MethodGet, testRuntimeURL, nil) + request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", fmt.Errorf("cannot make HTTP request: %w", err) + } + + response, err := http.DefaultClient.Do(request) + if err != nil { + return "", fmt.Errorf("cannot get: %w", err) + } + + if response.StatusCode != http.StatusOK { + _ = response.Body.Close() + return "", fmt.Errorf("%w: %d %s", ErrHTTPStatusNotOK, + response.StatusCode, response.Status) + } + + const flag = os.O_TRUNC | os.O_CREATE | os.O_WRONLY + file, err := os.OpenFile(runtimePath, flag, perm) //nolint:gosec if err != nil { - return err + _ = response.Body.Close() + return "", fmt.Errorf("cannot open target destination file: %w", err) } - const runtimeReqTimout = time.Second * 30 + _, err = io.Copy(file, response.Body) + if err != nil { + _ = response.Body.Close() + return "", fmt.Errorf("cannot copy response body to %s: %w", + runtimePath, err) + } - httpcli := http.Client{Timeout: runtimeReqTimout} - resp, err := httpcli.Do(req) + err = file.Close() if err != nil { - return err + return "", fmt.Errorf("cannot close file: %w", err) } - respBody, err := io.ReadAll(resp.Body) + err = response.Body.Close() if err != nil { - return err + return "", fmt.Errorf("cannot close HTTP response body: %w", err) } - defer resp.Body.Close() //nolint:errcheck - return os.WriteFile(testRuntimeFilePath, respBody, os.ModePerm) + return runtimePath, nil +} + +// GetAbsolutePath returns the completePath for a given targetDir +func GetAbsolutePath(targetDir string) string { + dir, err := os.Getwd() + if err != nil { + panic("failed to get current working directory") + } + return path.Join(dir, targetDir) } // TestRuntimeNetwork ... @@ -143,32 +195,6 @@ func generateEd25519Signatures(t *testing.T, n int) []*crypto.SignatureInfo { return signs } -// GenerateRuntimeWasmFile generates all runtime wasm files. -func GenerateRuntimeWasmFile() ([]string, error) { - var wasmFilePaths []string - for _, rt := range runtimes { - testRuntimeFilePath, testRuntimeURL := GetRuntimeVars(rt) - err := GetRuntimeBlob(testRuntimeFilePath, testRuntimeURL) - if err != nil { - return nil, err - } - - wasmFilePaths = append(wasmFilePaths, testRuntimeFilePath) - } - return wasmFilePaths, nil -} - -// RemoveFiles removes multiple files. -func RemoveFiles(files []string) error { - for _, file := range files { - err := os.Remove(file) - if err != nil { - return err - } - } - return nil -} - // NewTestExtrinsic builds a new extrinsic using centrifuge pkg func NewTestExtrinsic(t *testing.T, rt Instance, genHash, blockHash common.Hash, nonce uint64, call string, args ...interface{}) string { diff --git a/lib/runtime/wasmer/imports_test.go b/lib/runtime/wasmer/imports_test.go index bc4b93257e..12b5ec5397 100644 --- a/lib/runtime/wasmer/imports_test.go +++ b/lib/runtime/wasmer/imports_test.go @@ -7,13 +7,11 @@ import ( "bytes" "encoding/binary" "net/http" - "os" "sort" "testing" "time" "github.com/ChainSafe/chaindb" - "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/types" "github.com/ChainSafe/gossamer/lib/crypto" @@ -34,20 +32,6 @@ var testChildKey = []byte("childKey") var testKey = []byte("key") var testValue = []byte("value") -func TestMain(m *testing.M) { - wasmFilePaths, err := runtime.GenerateRuntimeWasmFile() - if err != nil { - log.Errorf("failed to generate runtime wasm file: %s", err) - os.Exit(1) - } - - // Start all tests - code := m.Run() - - runtime.RemoveFiles(wasmFilePaths) - os.Exit(code) -} - func Test_ext_offchain_timestamp_version_1(t *testing.T) { inst := NewTestInstance(t, runtime.HOST_API_TEST_RUNTIME) runtimeFunc, ok := inst.vm.Exports["rtm_ext_offchain_timestamp_version_1"] diff --git a/lib/runtime/wasmer/instance_test.go b/lib/runtime/wasmer/instance_test.go index 89b95d70eb..97b50c3bc5 100644 --- a/lib/runtime/wasmer/instance_test.go +++ b/lib/runtime/wasmer/instance_test.go @@ -4,8 +4,8 @@ package wasmer import ( + "context" "os" - "path/filepath" "testing" "github.com/ChainSafe/gossamer/lib/runtime" @@ -38,11 +38,10 @@ func TestPointerSize(t *testing.T) { func TestInstance_CheckRuntimeVersion(t *testing.T) { instance := NewTestInstance(t, runtime.NODE_RUNTIME) - err := runtime.GetRuntimeBlob(runtime.POLKADOT_RUNTIME_FP, runtime.POLKADOT_RUNTIME_URL) + polkadotRuntimeFilepath, err := runtime.GetRuntime( + context.Background(), runtime.POLKADOT_RUNTIME) require.NoError(t, err) - fp, err := filepath.Abs(runtime.POLKADOT_RUNTIME_FP) - require.NoError(t, err) - code, err := os.ReadFile(fp) + code, err := os.ReadFile(polkadotRuntimeFilepath) require.NoError(t, err) version, err := instance.CheckRuntimeVersion(code) require.NoError(t, err) diff --git a/lib/runtime/wasmer/test_helpers.go b/lib/runtime/wasmer/test_helpers.go index ab1f07458c..4ac3463ed3 100644 --- a/lib/runtime/wasmer/test_helpers.go +++ b/lib/runtime/wasmer/test_helpers.go @@ -4,7 +4,7 @@ package wasmer import ( - "path/filepath" + "context" "testing" "github.com/ChainSafe/gossamer/internal/log" @@ -23,30 +23,30 @@ var DefaultTestLogLvl = log.Info // NewTestInstance will create a new runtime instance using the given target runtime func NewTestInstance(t *testing.T, targetRuntime string) *Instance { + t.Helper() return NewTestInstanceWithTrie(t, targetRuntime, nil) } // NewTestInstanceWithTrie will create a new runtime (polkadot/test) with the supplied trie as the storage func NewTestInstanceWithTrie(t *testing.T, targetRuntime string, tt *trie.Trie) *Instance { - fp, cfg := setupConfig(t, targetRuntime, tt, DefaultTestLogLvl, 0) - r, err := NewInstanceFromFile(fp, cfg) + t.Helper() + + cfg := setupConfig(t, tt, DefaultTestLogLvl, 0) + runtimeFilepath, err := runtime.GetRuntime(context.Background(), targetRuntime) + require.NoError(t, err) + + r, err := NewInstanceFromFile(runtimeFilepath, cfg) require.NoError(t, err, "Got error when trying to create new VM", "targetRuntime", targetRuntime) require.NotNil(t, r, "Could not create new VM instance", "targetRuntime", targetRuntime) return r } -func setupConfig(t *testing.T, targetRuntime string, tt *trie.Trie, lvl log.Level, role byte) (string, *Config) { - testRuntimeFilePath, testRuntimeURL := runtime.GetRuntimeVars(targetRuntime) - - err := runtime.GetRuntimeBlob(testRuntimeFilePath, testRuntimeURL) - require.Nil(t, err, "Fail: could not get runtime", "targetRuntime", targetRuntime) +func setupConfig(t *testing.T, tt *trie.Trie, lvl log.Level, role byte) *Config { + t.Helper() s, err := storage.NewTrieState(tt) require.NoError(t, err) - fp, err := filepath.Abs(testRuntimeFilePath) - require.Nil(t, err, "could not create testRuntimeFilePath", "targetRuntime", targetRuntime) - ns := runtime.NodeStorage{ LocalStorage: runtime.NewInMemoryDB(t), PersistentStorage: runtime.NewInMemoryDB(t), // we're using a local storage here since this is a test runtime @@ -62,7 +62,7 @@ func setupConfig(t *testing.T, targetRuntime string, tt *trie.Trie, lvl log.Leve cfg.Network = new(runtime.TestRuntimeNetwork) cfg.Transaction = newTransactionStateMock() cfg.Role = role - return fp, cfg + return cfg } // NewTransactionStateMock create and return an runtime Transaction State interface mock From 2fa5d8a69e2dc6fcf293f06841a376a79134b0c4 Mon Sep 17 00:00:00 2001 From: Timothy Wu Date: Fri, 10 Jun 2022 13:48:07 -0400 Subject: [PATCH 04/48] fix(dot/peerset): fix sending on closed channel race condition when dropping peer (#2573) * set channel to nil on close, check for nil chan * refactor parallel goroutines to single goroutine * cr feedback * bump to 45m * bump up integration to 45m --- .github/workflows/integration-tests.yml | 2 +- .github/workflows/unit-tests.yml | 2 +- dot/peerset/peerset.go | 42 ++++++++++--------------- 3 files changed, 18 insertions(+), 28 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 80e138ef09..674099c7c4 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -60,4 +60,4 @@ jobs: restore-keys: ${{ runner.os }}-go-mod - name: Run integration tests - run: go test -timeout=30m -tags integration ${{ matrix.packages }} + run: go test -timeout=45m -tags integration ${{ matrix.packages }} diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 9924289545..0132ee7cc0 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -67,7 +67,7 @@ jobs: echo "$HOME/.local/bin" >> $GITHUB_PATH - name: Run unit tests - run: go test -short -coverprofile=coverage.out -covermode=atomic -timeout=30m ./... + run: go test -short -coverprofile=coverage.out -covermode=atomic -timeout=45m ./... - name: Test State - Race run: make test-state-race diff --git a/dot/peerset/peerset.go b/dot/peerset/peerset.go index df19ec2d51..dea6956f90 100644 --- a/dot/peerset/peerset.go +++ b/dot/peerset/peerset.go @@ -712,15 +712,28 @@ func (ps *PeerSet) start(ctx context.Context, actionQueue chan action) { ps.actionQueue = actionQueue ps.resultMsgCh = make(chan Message, msgChanSize) - go ps.listenAction(ctx) - go ps.periodicallyAllocateSlots(ctx) + go ps.listenActionAllocSlots(ctx) } -func (ps *PeerSet) listenAction(ctx context.Context) { +func (ps *PeerSet) listenActionAllocSlots(ctx context.Context) { + ticker := time.NewTicker(ps.nextPeriodicAllocSlots) + + defer func() { + ticker.Stop() + close(ps.resultMsgCh) + }() + for { select { case <-ctx.Done(): + logger.Debugf("peerset slot allocation exiting: %s", ctx.Err()) return + case <-ticker.C: + for setID := 0; setID < ps.peerState.getSetLength(); setID++ { + if err := ps.allocSlots(setID); err != nil { + logger.Warnf("failed to allocate slots: %s", err) + } + } case act, ok := <-ps.actionQueue: if !ok { return @@ -758,26 +771,3 @@ func (ps *PeerSet) listenAction(ctx context.Context) { } } } - -func (ps *PeerSet) periodicallyAllocateSlots(ctx context.Context) { - ticker := time.NewTicker(ps.nextPeriodicAllocSlots) - - defer func() { - ticker.Stop() - close(ps.resultMsgCh) - }() - - for { - select { - case <-ctx.Done(): - logger.Debugf("peerset slot allocation exiting: %s", ctx.Err()) - return - case <-ticker.C: - for setID := 0; setID < ps.peerState.getSetLength(); setID++ { - if err := ps.allocSlots(setID); err != nil { - logger.Warnf("failed to allocate slots: %s", err) - } - } - } - } -} From 62204027a47ec40b2c9e9ecf6529018a06e244d1 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Fri, 10 Jun 2022 17:10:53 -0400 Subject: [PATCH 05/48] chore(end-to-end tests): refactor and fix flaky tests (#2470) - make RPC helper functions not test aware - `waitForNode` context based function - Use `t.TempDir()` for nodes base paths - Add `GenerateGenesisAuths` helper function - Remove global variables - `GenesisDefault`, `GenesisDev`, `GenesisTwoAuthsSecondaryVRF0_9_10` - framework variable - configuration variables - `HOSTNAME` variable - `maxTries` in stress tests - `testTimeout` in stress tests - Write node configurations to `t.TempDir()` per test - Remove `DecodeRPC_NT` function - Split `tests/utils` package - `tests/utils/rpc` package - `tests/utils/websocket` package - `tests/utils/config` package - Rework start and stop of nodes - Init nodes in parallel - Remove unneeded logs - Log when node is ready - Prefix writer with node string for multiple nodes - Wait for nodes after starting all nodes - Add test writer using `t.Logf` - Refactor `getResponse` - not test aware - get passed target interface - push test assertion/skip to actual test - Add helper `retry.UntilNoError` and `retry.UntilOK` functions - Use in `compareBlocksByNumber` - Use in `waitForNode` - Change `GetBlockHash` to not retry RPC - Remove `PostWithRetry` - Replace `compareFinalizedHeadsWithRetry` with `retry.UntilOK` - Refactor and fix `TestSystemRPC` - Add empty skipped tests for missing cases - Split each subtest individually - Keep on retrying until main context is canceled - Fix `networkState` test case - Assert more fields - Fix #2161 and #807 - Refactor and fix `TestSync_SingleBlockProducer` - Refactor and make faster `TestAuthorSubmitExtrinsic` - Configure nodes using TOML Go struct only - Pass `--no-telemetry` as flag since it's not in TOML config - `dot`: remove `exportConfig` and `TestExportConfig` - `dot`: remove `ExportTomlConfig` and its test - Install NPM dependencies from Go code for polkdaot JS test - Remove `websocket` package - Use `Decode` from rpc package instead - Add fields to RPC server response struct - Do not use `reflect` - Add `fetchWithTimeout` helper function - Change test cases slices to individual subtests - Add `t.SkipNow()` to parent tests where all subtests are skipped - Make `TestChainRPC` event driven and increase testing depth - Add a few `TODO`s to increase testing depth - Run each query call in parallel - Refactor `TestChainSubscriptionRPC` - Refactor websocket code - Assert things and increase test depth - Graceful shutdown of websocket - Merge stable workflow in rpc workflow - Use Go to implement `integration-test-all.sh` directly in single 'stable' test - The test is fast and only does RPC calls, it should be part of the RPC workflow - Remove stable workflow from Makefile, github workflows directory and document - Log out command, logs and configuration file content on runtime error - Fixes directly related issues #2389, #2390, #2391 - Fixes flaky tests issues #807, #2161, #2165, #2167 Co-authored-by: Timothy Wu --- .github/workflows/docker-stable.yml | 33 - .gitignore | 3 - Dockerfile | 5 - Makefile | 16 +- docs/docs/testing-and-debugging/test-suite.md | 7 - dot/utils.go | 28 - dot/utils_test.go | 149 ----- lib/utils/utils.go | 5 + scripts/integration-test-all.sh | 112 ---- .../polkadotjs_test/start_polkadotjs_test.go | 38 +- tests/rpc/rpc_00_test.go | 52 +- tests/rpc/rpc_01-system_test.go | 271 +++++---- tests/rpc/rpc_02-author_test.go | 160 ++--- tests/rpc/rpc_03-chain_test.go | 509 +++++++++------- tests/rpc/rpc_04-offchain_test.go | 72 +-- tests/rpc/rpc_05-state_test.go | 290 +++++---- tests/rpc/rpc_06-engine_test.go | 55 +- tests/rpc/rpc_07-payment_test.go | 42 +- tests/rpc/rpc_08-contracts_test.go | 48 +- tests/rpc/rpc_09-babe_test.go | 42 +- tests/rpc/system_integration_test.go | 147 +++-- tests/stress/grandpa_test.go | 170 +++--- tests/stress/helpers.go | 173 ++---- tests/stress/network_test.go | 27 +- tests/stress/stress_test.go | 490 ++++++++------- tests/sync/sync_test.go | 47 +- tests/utils/chain.go | 138 ----- tests/utils/common.go | 51 -- tests/utils/config/config.go | 51 ++ tests/utils/config/default.go | 52 ++ tests/utils/config/write.go | 27 + tests/utils/dev.go | 62 -- tests/utils/framework.go | 51 +- tests/utils/gossamer_utils.go | 566 +----------------- tests/utils/header.go | 33 - tests/utils/node/errors.go | 156 +++++ tests/utils/node/node.go | 289 +++++++++ tests/utils/node/node_test.go | 28 + tests/utils/node/nodes.go | 176 ++++++ tests/utils/node/options.go | 23 + tests/utils/node/waitnode.go | 47 ++ tests/utils/node/writer.go | 28 + tests/utils/node/writer_test.go | 38 ++ tests/utils/pathfinder/gossamer.go | 22 + tests/utils/request_utils.go | 185 ------ tests/utils/retry/common.go | 28 + tests/utils/retry/untilnoerror.go | 29 + tests/utils/retry/untilok.go | 33 + tests/utils/rpc/chain.go | 131 ++++ tests/utils/rpc/dev.go | 76 +++ tests/utils/rpc/header.go | 49 ++ tests/utils/rpc/request.go | 123 ++++ tests/utils/rpc/system.go | 49 ++ tests/utils/rpc/types.go | 30 + tests/utils/rpc_methods.go | 27 - tests/utils/system.go | 29 - tests/utils/writer.go | 30 + 57 files changed, 2965 insertions(+), 2683 deletions(-) delete mode 100644 .github/workflows/docker-stable.yml delete mode 100755 scripts/integration-test-all.sh delete mode 100644 tests/utils/chain.go create mode 100644 tests/utils/config/config.go create mode 100644 tests/utils/config/default.go create mode 100644 tests/utils/config/write.go delete mode 100644 tests/utils/dev.go delete mode 100644 tests/utils/header.go create mode 100644 tests/utils/node/errors.go create mode 100644 tests/utils/node/node.go create mode 100644 tests/utils/node/node_test.go create mode 100644 tests/utils/node/nodes.go create mode 100644 tests/utils/node/options.go create mode 100644 tests/utils/node/waitnode.go create mode 100644 tests/utils/node/writer.go create mode 100644 tests/utils/node/writer_test.go create mode 100644 tests/utils/pathfinder/gossamer.go delete mode 100644 tests/utils/request_utils.go create mode 100644 tests/utils/retry/common.go create mode 100644 tests/utils/retry/untilnoerror.go create mode 100644 tests/utils/retry/untilok.go create mode 100644 tests/utils/rpc/chain.go create mode 100644 tests/utils/rpc/dev.go create mode 100644 tests/utils/rpc/header.go create mode 100644 tests/utils/rpc/request.go create mode 100644 tests/utils/rpc/system.go create mode 100644 tests/utils/rpc/types.go delete mode 100644 tests/utils/rpc_methods.go delete mode 100644 tests/utils/system.go create mode 100644 tests/utils/writer.go diff --git a/.github/workflows/docker-stable.yml b/.github/workflows/docker-stable.yml deleted file mode 100644 index 29f88b4be8..0000000000 --- a/.github/workflows/docker-stable.yml +++ /dev/null @@ -1,33 +0,0 @@ -on: - pull_request: - # Commented paths to avoid skipping required workflow - # See https://github.community/t/feature-request-conditional-required-checks/16761 - # paths: - # - .github/workflows/docker-stable.yml - # - "**/*.go" - # - "chain/**" - # - "cmd/**" - # - "dot/**" - # - "internal/**" - # - "lib/**" - # - "pkg/**" - # - scripts/integration-test-all.sh - # - go.mod - # - go.sum -name: docker-stable - -jobs: - docker-stable-tests: - runs-on: ubuntu-latest - env: - DOCKER_BUILDKIT: "1" - steps: - - uses: docker/build-push-action@v3 - with: - load: true - target: builder - tags: chainsafe/gossamer:test - - - name: Run stable tests - run: | - docker run chainsafe/gossamer:test sh -c "make it-stable" diff --git a/.gitignore b/.gitignore index 8995c9de36..d5d719704f 100644 --- a/.gitignore +++ b/.gitignore @@ -23,9 +23,6 @@ test_data trie_putandget_failed_test_data_* tmp -tests/utils/config* -tests/utils/genesis* - # node_modules used by polkadot.js/api tests tests/polkadotjs_test/node_modules !tests/polkadotjs_test/test/*.wasm diff --git a/Dockerfile b/Dockerfile index 2eb2c33cc8..c3e678117f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,11 +17,6 @@ RUN wget -qO- https://deb.nodesource.com/setup_14.x | bash - && \ RUN wget -O /usr/local/bin/subkey https://chainbridge.ams3.digitaloceanspaces.com/subkey-v2.0.0 && \ chmod +x /usr/local/bin/subkey -# Polkadot JS dependencies -WORKDIR /go/src/github.com/ChainSafe/gossamer/tests/polkadotjs_test -COPY tests/polkadotjs_test/package.json tests/polkadotjs_test/package-lock.json ./ -RUN npm install - WORKDIR /go/src/github.com/ChainSafe/gossamer # Go dependencies diff --git a/Makefile b/Makefile index 40926cb548..fef16e8427 100644 --- a/Makefile +++ b/Makefile @@ -36,32 +36,26 @@ test: git lfs pull go test -short -coverprofile c.out ./... -timeout=30m -## it-stable: Runs Integration Tests Stable mode -it-stable: - @echo " > \033[32mRunning Integration Tests...\033[0m " - @chmod +x scripts/integration-test-all.sh - ./scripts/integration-test-all.sh -q 3 -s 10 - ## it-stress: Runs Integration Tests stress mode it-stress: build @echo " > \033[32mRunning stress tests...\033[0m " - HOSTNAME=0.0.0.0 MODE=stress go test ./tests/stress/... -timeout=15m -v -short -run TestSync_ + MODE=stress go test ./tests/stress/... -timeout=15m -v -short -run TestSync_ it-grandpa: build @echo " > \033[32mRunning GRANDPA stress tests...\033[0m " - HOSTNAME=0.0.0.0 MODE=stress go test ./tests/stress/... -timeout=12m -v -short -run TestStress_Grandpa_ + MODE=stress go test ./tests/stress/... -timeout=12m -v -short -run TestStress_Grandpa_ it-rpc: build @echo " > \033[32mRunning Integration Tests RPC Specs mode...\033[0m " - HOSTNAME=0.0.0.0 MODE=rpc go test ./tests/rpc/... -timeout=10m -v + MODE=rpc go test ./tests/rpc/... -timeout=10m -v it-sync: build @echo " > \033[32mRunning Integration Tests sync mode...\033[0m " - HOSTNAME=0.0.0.0 MODE=sync go test ./tests/sync/... -timeout=5m -v + MODE=sync go test ./tests/sync/... -timeout=5m -v it-polkadotjs: build @echo " > \033[32mRunning Integration Tests polkadot.js/api mode...\033[0m " - HOSTNAME=0.0.0.0 MODE=polkadot go test ./tests/polkadotjs_test/... -timeout=5m -v + MODE=polkadot go test ./tests/polkadotjs_test/... -timeout=5m -v ## test: Runs `go test -race` on project test files. test-state-race: diff --git a/docs/docs/testing-and-debugging/test-suite.md b/docs/docs/testing-and-debugging/test-suite.md index 5e5c7ab31b..ca6a3fb6e6 100644 --- a/docs/docs/testing-and-debugging/test-suite.md +++ b/docs/docs/testing-and-debugging/test-suite.md @@ -25,13 +25,6 @@ Proceed to open `cover.html` in your preferred browser. ### Gossamer Integration Tests Running Gossamer's integration tests with the below commands will build a Gossamer binary, install required dependencies, and then proceeds to run the provided set of tests. Integration tests can also be run within a docker container. - - -To run Gossamer integration tests in **stable** mode run the following command: - -``` -make it-stable -``` To run Gossamer integration tests in **stress** mode run the following command: diff --git a/dot/utils.go b/dot/utils.go index 1748c7c9dd..596fe397d7 100644 --- a/dot/utils.go +++ b/dot/utils.go @@ -9,37 +9,9 @@ import ( "os" "strings" - ctoml "github.com/ChainSafe/gossamer/dot/config/toml" "github.com/cosmos/go-bip39" - "github.com/naoina/toml" ) -// exportConfig exports a dot configuration to a toml configuration file -func exportConfig(cfg *Config, fp string) { - raw, err := toml.Marshal(*cfg) - if err != nil { - logger.Errorf("failed to marshal configuration: %s", err) - os.Exit(1) - } - if err := os.WriteFile(fp, raw, 0600); err != nil { - logger.Errorf("failed to write file: %s", err) - os.Exit(1) - } -} - -// ExportTomlConfig exports a dot configuration to a toml configuration file -func ExportTomlConfig(cfg *ctoml.Config, fp string) { - raw, err := toml.Marshal(*cfg) - if err != nil { - logger.Errorf("failed to marshal configuration: %s", err) - os.Exit(1) - } - if err := os.WriteFile(fp, raw, 0600); err != nil { - logger.Errorf("failed to write file: %s", err) - os.Exit(1) - } -} - // CreateJSONRawFile will generate a JSON genesis file with raw storage func CreateJSONRawFile(bs *BuildSpec, fp string) { data, err := bs.ToJSONRaw() diff --git a/dot/utils_test.go b/dot/utils_test.go index 638d003c21..38c7c8c01a 100644 --- a/dot/utils_test.go +++ b/dot/utils_test.go @@ -12,7 +12,6 @@ import ( "path/filepath" "testing" - ctoml "github.com/ChainSafe/gossamer/dot/config/toml" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/genesis" @@ -80,154 +79,6 @@ func TestCreateJSONRawFile(t *testing.T) { } } -func TestExportConfig(t *testing.T) { - filepath := filepath.Join(t.TempDir(), "test.json") - type args struct { - cfg *Config - fp string - } - tests := []struct { - name string - args args - want *os.File - wantedContent string - }{ - { - name: "working example", - args: args{ - cfg: &Config{}, - fp: filepath, - }, - want: &os.File{}, - wantedContent: `[global] -name = "" -id = "" -base_path = "" -log_lvl = 0 -publish_metrics = false -metrics_address = "" -no_telemetry = false -telemetry_urls = [] -retain_blocks = 0 -pruning = "" - -[log] -core_lvl = 0 -digest_lvl = 0 -sync_lvl = 0 -network_lvl = 0 -rpc_lvl = 0 -state_lvl = 0 -runtime_lvl = 0 -block_producer_lvl = 0 -finality_gadget_lvl = 0 - -[init] -genesis = "" - -[account] -key = "" -unlock = "" - -[core] -roles = 0 -babe_authority = false -b_a_b_e_lead = false -grandpa_authority = false -wasm_interpreter = "" -grandpa_interval = 0 - -[network] -port = 0 -bootnodes = [] -protocol_id = "" -no_bootstrap = false -no_m_dns = false -min_peers = 0 -max_peers = 0 -persistent_peers = [] -discovery_interval = 0 -public_ip = "" -public_dns = "" - -[rpc] -enabled = false -external = false -unsafe = false -unsafe_external = false -port = 0 -host = "" -modules = [] -w_s_port = 0 -w_s = false -w_s_external = false -w_s_unsafe = false -w_s_unsafe_external = false - -[system] -system_name = "" -system_version = "" - -[state] -rewind = 0 - -[pprof] -enabled = false - -[pprof.settings] -listening_address = "" -block_profile_rate = 0 -mutex_profile_rate = 0 -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - exportConfig(tt.args.cfg, tt.args.fp) - - content, err := ioutil.ReadFile(tt.args.fp) - require.NoError(t, err) - require.Equal(t, tt.wantedContent, string(content)) - - }) - } -} - -func TestExportTomlConfig(t *testing.T) { - filepath := filepath.Join(t.TempDir(), "test.json") - type args struct { - cfg *ctoml.Config - fp string - } - tests := []struct { - name string - args args - wantedContent string - }{ - { - name: "working example", - args: args{ - cfg: &ctoml.Config{}, - fp: filepath, - }, - wantedContent: `[core] -babe-authority = false -grandpa-authority = false -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ExportTomlConfig(tt.args.cfg, tt.args.fp) - - content, err := ioutil.ReadFile(tt.args.fp) - require.NoError(t, err) - require.Equal(t, tt.wantedContent, string(content)) - - }) - } -} - func TestNewTestConfig(t *testing.T) { basePath := t.TempDir() incBasePath := basePath[:len(basePath)-1] + "2" diff --git a/lib/utils/utils.go b/lib/utils/utils.go index 69f5454e50..8b1ab8440f 100644 --- a/lib/utils/utils.go +++ b/lib/utils/utils.go @@ -181,6 +181,11 @@ func GetDevGenesisPath(t *testing.T) string { return filepath.Join(GetProjectRootPathTest(t), "./chain/dev/genesis.json") } +// GetDevGenesisSpecPathTest gets the dev genesis spec path +func GetDevGenesisSpecPathTest(t *testing.T) string { + return filepath.Join(GetProjectRootPathTest(t), "./chain/dev/genesis-spec.json") +} + // GetGssmrGenesisPath gets the gssmr genesis path // and returns an error if it cannot find it. func GetGssmrGenesisPath() (path string, err error) { diff --git a/scripts/integration-test-all.sh b/scripts/integration-test-all.sh deleted file mode 100755 index 7c1cf9d81a..0000000000 --- a/scripts/integration-test-all.sh +++ /dev/null @@ -1,112 +0,0 @@ -#!/bin/bash - -# "stable" mode tests assume data is static -# "live" mode tests assume data dynamic - -SCRIPT=$(basename ${BASH_SOURCE[0]}) -TEST="" -QTD=1 -SLEEP_TIMEOUT=5 -TEST_QTD=3 - -PORT=7000 -RPC_PORT=8540 -HOSTNAME="0.0.0.0" -MODE="stable" - -declare -a keys=("alice" "bob" "charlie" "dave" "eve" "ferdie" "george" "heather" "ian") - -usage() { - echo "Usage: $SCRIPT" - echo "Optional command line arguments" - echo "-t -- Test to run. eg: rpc" - echo "-q -- Quantity of nodes to run. eg: 3" - echo "-z -- Quantity of nodes to run tests against eg: 3" - echo "-s -- Sleep between operations in secs. eg: 5" - exit 1 -} - -while getopts "h?t:q:z:s:" args; do -case $args in - h|\?) - usage; - exit;; - t ) TEST=${OPTARG};; - q ) QTD=${OPTARG};; - z ) TEST_QTD=${OPTARG};; - s ) SLEEP_TIMEOUT=${OPTARG};; - esac -done - -set -euxo pipefail - -BASE_PATH=$(mktemp -d -t gossamer-basepath.XXXXX) - -if [[ ! "$BASE_PATH" ]]; then - echo "Could not create $BASE_PATH" - exit 1 -fi - -# Compile gossamer -echo "compiling gossamer" -make build - -# PID array declaration -arr=() - -start_func() { - echo "starting gossamer node $i in background ..." - "$PWD"/bin/gossamer --port=$(($PORT + $i)) --key=${keys[$i-1]} --basepath="$BASE_PATH$i" \ - --rpc --rpchost=$HOSTNAME --rpcport=$(($RPC_PORT + $i)) --roles=1 --rpcmods=system,author,chain >"$BASE_PATH"/node"$i".log 2>&1 & disown - - GOSSAMER_PID=$! - echo "started gossamer node, pid=$GOSSAMER_PID" - # add PID to array - arr+=("$GOSSAMER_PID") -} - -# Run node with static blockchain database -# For loop N times -for i in $(seq 1 "$QTD"); do - start_func "$i" - echo "sleeping $SLEEP_TIMEOUT seconds for startup" - sleep "$SLEEP_TIMEOUT" - echo "done sleeping" -done - -echo "sleeping $SLEEP_TIMEOUT seconds before running tests ... " -sleep "$SLEEP_TIMEOUT" -echo "done sleeping" - -set +e - -if [[ -z $TEST || $TEST == "rpc" ]]; then - - for i in $(seq 1 "$TEST_QTD"); do - echo "going to test gossamer node $(($RPC_PORT + $i))..." - MODE=$MODE NETWORK_SIZE=$QTD HOSTNAME=$HOSTNAME PORT=$(($RPC_PORT + $i)) go test ./tests/rpc/... -timeout=60s -v -count=1 - - RPC_FAIL=$? - done - -fi - -stop_func() { - GOSSAMER_PID=$i - echo "shutting down gossamer node, pid=$GOSSAMER_PID ..." - - # Shutdown gossamer node - kill -9 "$GOSSAMER_PID" - wait "$GOSSAMER_PID" -} - - -for i in "${arr[@]}"; do - stop_func "$i" -done - -if [[ (-z $TEST || $TEST == "rpc") && $RPC_FAIL -ne 0 ]]; then - exit $RPC_FAIL -else - exit 0 -fi diff --git a/tests/polkadotjs_test/start_polkadotjs_test.go b/tests/polkadotjs_test/start_polkadotjs_test.go index 61714c2b6c..26e3bf775a 100644 --- a/tests/polkadotjs_test/start_polkadotjs_test.go +++ b/tests/polkadotjs_test/start_polkadotjs_test.go @@ -4,12 +4,15 @@ package polkadotjs_test import ( - "os" + "context" "os/exec" "strings" "testing" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -21,23 +24,38 @@ func TestStartGossamerAndPolkadotAPI(t *testing.T) { t.Log("Going to skip polkadot.js/api suite tests") return } - t.Log("starting gossamer for polkadot.js/api tests...") - utils.CreateDefaultConfig() - defer os.Remove(utils.ConfigDefault) + const nodePackageManager = "npm" + t.Logf("Checking %s is available...", nodePackageManager) + _, err := exec.LookPath(nodePackageManager) + if err != nil { + t.Fatalf("%s is not available: %s", nodePackageManager, err) + } - nodes, err := utils.InitializeAndStartNodesWebsocket(t, 1, utils.GenesisDev, utils.ConfigDefault) + t.Log("Installing Node dependencies...") + cmd := exec.Command(nodePackageManager, "install") + testWriter := utils.NewTestWriter(t) + cmd.Stdout = testWriter + cmd.Stderr = testWriter + err = cmd.Run() require.NoError(t, err) + t.Log("starting gossamer for polkadot.js/api tests...") + + tomlConfig := config.Default() + tomlConfig.Init.Genesis = libutils.GetDevGenesisSpecPathTest(t) + tomlConfig.Core.BABELead = true + tomlConfig.RPC.WS = true + n := node.New(t, tomlConfig) + + ctx, cancel := context.WithCancel(context.Background()) + n.InitAndStartTest(ctx, t, cancel) + command := "npx mocha ./test --timeout 30000" parts := strings.Fields(command) - data, err := exec.Command(parts[0], parts[1:]...).Output() + data, err := exec.CommandContext(ctx, parts[0], parts[1:]...).CombinedOutput() assert.NoError(t, err, string(data)) //uncomment this to see log results from javascript tests //fmt.Printf("%s\n", data) - - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) } diff --git a/tests/rpc/rpc_00_test.go b/tests/rpc/rpc_00_test.go index e1b9b462e0..6c4d1e30ff 100644 --- a/tests/rpc/rpc_00_test.go +++ b/tests/rpc/rpc_00_test.go @@ -6,54 +6,46 @@ package rpc import ( "context" "fmt" - "os" - "reflect" - "strconv" "testing" + "time" - "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/stretchr/testify/require" ) var ( - currentPort = strconv.Itoa(utils.BaseRPCPort) - rpcSuite = "rpc" + rpcSuite = "rpc" ) -func TestMain(m *testing.M) { - fmt.Println("Going to start RPC suite test") - - utils.CreateDefaultConfig() - defer os.Remove(utils.ConfigDefault) - - // Start all tests - code := m.Run() - os.Exit(code) -} - type testCase struct { description string method string params string expected interface{} - skip bool } -func getResponse(ctx context.Context, t *testing.T, test *testCase) interface{} { - if test.skip { - t.Skip("RPC endpoint not yet implemented") - return nil - } +func fetchWithTimeout(ctx context.Context, t *testing.T, + method, params string, target interface{}) { + t.Helper() - endpoint := utils.NewEndpoint(currentPort) - respBody, err := utils.PostRPC(ctx, endpoint, test.method, test.params) + getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second) + defer getResponseCancel() + err := getResponse(getResponseCtx, method, params, target) require.NoError(t, err) +} - target := reflect.New(reflect.TypeOf(test.expected)).Interface() - err = utils.DecodeRPC(t, respBody, target) - require.Nil(t, err, "Could not DecodeRPC", string(respBody)) +func getResponse(ctx context.Context, method, params string, target interface{}) (err error) { + const currentPort = "8540" + endpoint := rpc.NewEndpoint(currentPort) + respBody, err := rpc.Post(ctx, endpoint, method, params) + if err != nil { + return fmt.Errorf("cannot RPC post: %w", err) + } - require.NotNil(t, target) + err = rpc.Decode(respBody, &target) + if err != nil { + return fmt.Errorf("cannot decode RPC response: %w", err) + } - return target + return nil } diff --git a/tests/rpc/rpc_01-system_test.go b/tests/rpc/rpc_01-system_test.go index bf43135990..6b386187c7 100644 --- a/tests/rpc/rpc_01-system_test.go +++ b/tests/rpc/rpc_01-system_test.go @@ -9,137 +9,190 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/lib/common" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +const peerIDRegex = `^[a-zA-Z0-9]{52}$` + func TestSystemRPC(t *testing.T) { if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test system_name", - method: "system_name", - skip: true, - }, - { //TODO - description: "test system_version", - method: "system_version", - skip: true, - }, - { //TODO - description: "test system_chain", - method: "system_chain", - skip: true, - }, - { //TODO - description: "test system_properties", - method: "system_properties", - skip: true, - }, - { - description: "test system_health", - method: "system_health", - expected: modules.SystemHealthResponse{ - Peers: 2, - IsSyncing: true, - ShouldHavePeers: true, - }, - params: "{}", - }, - { - description: "test system_peers", - method: "system_peers", - expected: modules.SystemPeersResponse{}, - params: "{}", - }, - { - description: "test system_network_state", - method: "system_networkState", - expected: modules.SystemNetworkStateResponse{ - NetworkState: modules.NetworkStateString{ - PeerID: "", - }, - }, - params: "{}", - }, - { //TODO - description: "test system_addReservedPeer", - method: "system_addReservedPeer", - skip: true, - }, - { //TODO - description: "test system_removeReservedPeer", - method: "system_removeReservedPeer", - skip: true, - }, - { //TODO - description: "test system_nodeRoles", - method: "system_nodeRoles", - skip: true, - }, - { //TODO - description: "test system_accountNextIndex", - method: "system_accountNextIndex", - skip: true, - }, - } - - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 3, utils.GenesisDefault, utils.ConfigDefault) - - //use only first server for tests - require.NoError(t, err) + const testTimeout = 8 * time.Minute + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) - time.Sleep(time.Second) // give server a second to start + const numberOfNodes = 3 - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - target := getResponse(getResponseCtx, t, test) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numberOfNodes, tomlConfig) - switch v := target.(type) { - case *modules.SystemHealthResponse: - t.Log("Will assert SystemHealthResponse", "target", target) + nodes.InitAndStartTest(ctx, t, cancel) - require.Equal(t, test.expected.(modules.SystemHealthResponse).IsSyncing, v.IsSyncing) - require.Equal(t, test.expected.(modules.SystemHealthResponse).ShouldHavePeers, v.ShouldHavePeers) - require.GreaterOrEqual(t, v.Peers, test.expected.(modules.SystemHealthResponse).Peers) + t.Run("system_health", func(t *testing.T) { + t.Parallel() - case *modules.SystemNetworkStateResponse: - t.Log("Will assert SystemNetworkStateResponse", "target", target) + const method = "system_health" + const params = "{}" - require.NotNil(t, v.NetworkState) - require.NotNil(t, v.NetworkState.PeerID) + expected := modules.SystemHealthResponse{ + Peers: numberOfNodes - 1, + ShouldHavePeers: true, + } - case *modules.SystemPeersResponse: - t.Log("Will assert SystemPeersResponse", "target", target) + var response modules.SystemHealthResponse + err := retry.UntilOK(ctx, time.Second, func() (ok bool, err error) { + getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second) + err = getResponse(getResponseCtx, method, params, &response) + getResponseCancel() + if err != nil { + return false, err + } + return response.Peers == expected.Peers, nil + }) + require.NoError(t, err) + + // IsSyncing can be true or false + response.IsSyncing = false + + assert.Equal(t, expected, response) + }) + + t.Run("system_peers", func(t *testing.T) { + t.Parallel() + + // Wait for N-1 peers connected and no syncing + err := retry.UntilOK(ctx, time.Second, func() (ok bool, err error) { + getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second) + const method = "system_health" + const params = "{}" + var healthResponse modules.SystemHealthResponse + err = getResponse(getResponseCtx, method, params, &healthResponse) + getResponseCancel() + if err != nil { + return false, err // error and stop retrying + } - require.NotNil(t, v) + ok = healthResponse.Peers == numberOfNodes-1 && !healthResponse.IsSyncing + return ok, nil + }) + require.NoError(t, err) + + var response modules.SystemPeersResponse + // Wait for N-1 peers with peer IDs set + err = retry.UntilOK(ctx, time.Second, func() (ok bool, err error) { + getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second) + const method = "system_peers" + const params = "{}" + err = getResponse(getResponseCtx, method, params, &response) + getResponseCancel() + if err != nil { + return false, err // error and stop retrying + } - //TODO: #807 - //this assertion requires more time on init to be enabled - //require.GreaterOrEqual(t, len(v.Peers), 2) + if len(response) != numberOfNodes-1 { + return false, nil // retry + } - for _, vv := range *v { - require.NotNil(t, vv.PeerID) - require.NotNil(t, vv.Roles) - require.NotNil(t, vv.BestHash) - require.NotNil(t, vv.BestNumber) + for _, peer := range response { + // wait for all peers to have the same best block number + if peer.PeerID == "" || peer.BestHash.IsEmpty() { + return false, nil // retry } - } + return true, nil // success, stop retrying }) - } - - t.Log("going to tear down gossamer...") - - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + require.NoError(t, err) + + expectedResponse := modules.SystemPeersResponse{ + // Assert they all have the same best block number and hash + {Roles: 4, PeerID: ""}, + {Roles: 4, PeerID: ""}, + } + for i := range response { + // Check randomly generated peer IDs and clear them + assert.Regexp(t, peerIDRegex, response[i].PeerID) + response[i].PeerID = "" + // TODO assert these are all the same, + // see https://github.com/ChainSafe/gossamer/issues/2498 + response[i].BestHash = common.Hash{} + response[i].BestNumber = 0 + } + + assert.Equal(t, expectedResponse, response) + }) + + t.Run("system_networkState", func(t *testing.T) { + t.Parallel() + + const method = "system_networkState" + const params = "{}" + + var response modules.SystemNetworkStateResponse + fetchWithTimeout(ctx, t, method, params, &response) + + assert.Regexp(t, peerIDRegex, response.NetworkState.PeerID) + response.NetworkState.PeerID = "" + + assert.NotEmpty(t, response.NetworkState.Multiaddrs) + for _, addr := range response.NetworkState.Multiaddrs { + assert.Regexp(t, "^/ip[4|6]/.+/tcp/[0-9]{1,5}/p2p/[a-zA-Z0-9]{52}$", addr) + } + response.NetworkState.Multiaddrs = nil + + // Ensure we don't need to assert other fields + expectedResponse := modules.SystemNetworkStateResponse{} + assert.Equal(t, expectedResponse, response) + }) + + t.Run("system_name", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_version", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_chain", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_properties", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_addReservedPeer", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_removeReservedPeer", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_nodeRoles", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) + + t.Run("system_accountNextIndex", func(t *testing.T) { + t.Parallel() + t.Skip("test not implemented") + }) } diff --git a/tests/rpc/rpc_02-author_test.go b/tests/rpc/rpc_02-author_test.go index 9db0aabddd..1716eef32e 100644 --- a/tests/rpc/rpc_02-author_test.go +++ b/tests/rpc/rpc_02-author_test.go @@ -12,7 +12,11 @@ import ( "github.com/centrifuge/go-substrate-rpc-client/v3/scale" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" gsrpc "github.com/centrifuge/go-substrate-rpc-client/v3" "github.com/centrifuge/go-substrate-rpc-client/v3/signature" "github.com/centrifuge/go-substrate-rpc-client/v3/types" @@ -25,20 +29,27 @@ func TestAuthorSubmitExtrinsic(t *testing.T) { return } - t.Log("starting gossamer...") + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - time.Sleep(30 * time.Second) // wait for server to start and block 1 to be produced + api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("http://localhost:%s", node.RPCPort())) + require.NoError(t, err) - api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("http://localhost:%s", nodes[0].RPCPort)) + // Wait for the first block to be produced. + const retryWait = time.Second + err = retry.UntilOK(ctx, retryWait, func() (ok bool, err error) { + block, err := api.RPC.Chain.GetBlockLatest() + if err != nil { + return false, err + } + return block.Block.Header.Number > 0, nil + }) require.NoError(t, err) meta, err := api.RPC.State.GetMetadataLatest() @@ -94,60 +105,75 @@ func TestAuthorRPC(t *testing.T) { return } - testCases := []*testCase{ - { //TODO - description: "test author_submitExtrinsic", - method: "author_submitExtrinsic", - skip: true, - }, - { //TODO - description: "test author_pendingExtrinsics", - method: "author_pendingExtrinsics", - skip: true, - }, - { //TODO - description: "test author_removeExtrinsic", - method: "author_removeExtrinsic", - skip: true, - }, - { //TODO - description: "test author_insertKey", - method: "author_insertKey", - skip: true, - }, - { //TODO - description: "test author_rotateKeys", - method: "author_rotateKeys", - skip: true, - }, - { //TODO - description: "test author_hasSessionKeys", - method: "author_hasSessionKeys", - skip: true, - }, - { //TODO - description: "test author_hasKey", - method: "author_hasKey", - skip: true, - }, - } - - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) - - time.Sleep(time.Second) // give server a second to start - - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } - - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) + + t.Run("author_pendingExtrinsics", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_pendingExtrinsics", "", target) + }) + + t.Run("author_submitExtrinsic", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_submitExtrinsic", "", target) + }) + + t.Run("author_pendingExtrinsics", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_pendingExtrinsics", "", target) + }) + + t.Run("author_removeExtrinsic", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_removeExtrinsic", "", target) + }) + + t.Run("author_insertKey", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_insertKey", "", target) + }) + + t.Run("author_rotateKeys", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_rotateKeys", "", target) + }) + + t.Run("author_hasSessionKeys", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_hasSessionKeys", "", target) + }) + + t.Run("author_hasKey", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO + + var target interface{} // TODO + fetchWithTimeout(ctx, t, "author_hasKey", "", target) + }) } diff --git a/tests/rpc/rpc_03-chain_test.go b/tests/rpc/rpc_03-chain_test.go index 31111ccb5d..6f6be2e145 100644 --- a/tests/rpc/rpc_03-chain_test.go +++ b/tests/rpc/rpc_03-chain_test.go @@ -5,283 +5,356 @@ package rpc import ( "context" - "log" + "errors" + "fmt" + "math/rand" "testing" "time" "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/rpc/subscription" + "github.com/ChainSafe/gossamer/lib/common" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +const ( + regex32BytesHex = `^0x[0-9a-f]{64}$` + regexBytesHex = `^0x[0-9a-f]{2}[0-9a-f]*$` + regexBytesHexOrEmpty = `^0x[0-9a-f]*$` +) + func TestChainRPC(t *testing.T) { if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { - description: "test chain_getFinalizedHead", - method: "chain_getFinalizedHead", - expected: "", - params: "[]", - }, - { - description: "test chain_getHeader", - method: "chain_getHeader", - expected: modules.ChainBlockHeaderResponse{ - Number: "1", - }, - params: "[]", - }, - { - description: "test chain_getBlock", - method: "chain_getBlock", - expected: modules.ChainBlockResponse{ - Block: modules.ChainBlock{ - Header: modules.ChainBlockHeaderResponse{ - Number: "1", - }, - Body: []string{}, - }, + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) + + // Wait for Gossamer to produce block 2 + errBlockNumberTooHigh := errors.New("block number is too high") + const retryWaitDuration = 200 * time.Millisecond + err := retry.UntilOK(ctx, retryWaitDuration, func() (ok bool, err error) { + var header modules.ChainBlockHeaderResponse + fetchWithTimeout(ctx, t, "chain_getHeader", "[]", &header) + number, err := common.HexToUint(header.Number) + if err != nil { + return false, fmt.Errorf("cannot convert header number to uint: %w", err) + } + + switch number { + case 0, 1: + return false, nil + case 2: + return true, nil + default: + return false, fmt.Errorf("%w: %d", errBlockNumberTooHigh, number) + } + }) + require.NoError(t, err) + + var finalizedHead string + fetchWithTimeout(ctx, t, "chain_getFinalizedHead", "[]", &finalizedHead) + assert.Regexp(t, regex32BytesHex, finalizedHead) + + var header modules.ChainBlockHeaderResponse + fetchWithTimeout(ctx, t, "chain_getHeader", "[]", &header) + + // Check and clear unpredictable fields + assert.Regexp(t, regex32BytesHex, header.StateRoot) + header.StateRoot = "" + assert.Regexp(t, regex32BytesHex, header.ExtrinsicsRoot) + header.ExtrinsicsRoot = "" + assert.Len(t, header.Digest.Logs, 2) + for _, digestLog := range header.Digest.Logs { + assert.Regexp(t, regexBytesHex, digestLog) + } + header.Digest.Logs = nil + + // Assert remaining struct with predictable fields + expectedHeader := modules.ChainBlockHeaderResponse{ + ParentHash: finalizedHead, + Number: "0x02", + } + assert.Equal(t, expectedHeader, header) + + var block modules.ChainBlockResponse + fetchWithTimeout(ctx, t, "chain_getBlock", fmt.Sprintf(`["`+header.ParentHash+`"]`), &block) + + // Check and clear unpredictable fields + assert.Regexp(t, regex32BytesHex, block.Block.Header.ParentHash) + block.Block.Header.ParentHash = "" + assert.Regexp(t, regex32BytesHex, block.Block.Header.StateRoot) + block.Block.Header.StateRoot = "" + assert.Regexp(t, regex32BytesHex, block.Block.Header.ExtrinsicsRoot) + block.Block.Header.ExtrinsicsRoot = "" + assert.Len(t, block.Block.Header.Digest.Logs, 3) + for _, digestLog := range block.Block.Header.Digest.Logs { + assert.Regexp(t, regexBytesHex, digestLog) + } + block.Block.Header.Digest.Logs = nil + assert.Len(t, block.Block.Body, 1) + const bodyRegex = `^0x280403000b[0-9a-z]{8}8101$` + assert.Regexp(t, bodyRegex, block.Block.Body[0]) + block.Block.Body = nil + + // Assert remaining struct with predictable fields + expectedBlock := modules.ChainBlockResponse{ + Block: modules.ChainBlock{ + Header: modules.ChainBlockHeaderResponse{ + Number: "0x01", }, - params: "[]", - }, - { - description: "test chain_getBlockHash", - method: "chain_getBlockHash", - expected: "", - params: "[]", }, } + assert.Equal(t, expectedBlock, block) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) + var blockHash string + fetchWithTimeout(ctx, t, "chain_getBlockHash", "[]", &blockHash) + assert.Regexp(t, regex32BytesHex, blockHash) + assert.NotEqual(t, finalizedHead, blockHash) +} - time.Sleep(time.Second * 5) // give server a few seconds to start +func TestChainSubscriptionRPC(t *testing.T) { + if utils.MODE != rpcSuite { + t.Log("Going to skip RPC suite tests") + return + } - chainBlockHeaderHash := "" - for _, test := range testCases { + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + tomlConfig.RPC.WS = true // WS port is set in the node.New constructor + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Run(test.description, func(t *testing.T) { + const endpoint = "ws://localhost:8546/" - // set params for chain_getBlock from previous chain_getHeader call - if chainBlockHeaderHash != "" { - test.params = "[\"" + chainBlockHeaderHash + "\"]" - } + t.Run("chain_subscribeNewHeads", func(t *testing.T) { + t.Parallel() - ctx := context.Background() + const numberOfMesages = 2 + messages := callAndSubscribeWebsocket(ctx, t, endpoint, "chain_subscribeNewHeads", "[]", numberOfMesages) - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - target := getResponse(getResponseCtx, t, test) + allParams := make([]subscription.Params, numberOfMesages) + for i, message := range messages { + err := rpc.Decode(message, &allParams[i]) + require.NoError(t, err, "cannot decode websocket message for message index %d", i) + } - switch v := target.(type) { - case *modules.ChainBlockHeaderResponse: - t.Log("Will assert ChainBlockHeaderResponse", "value", v) + for i, params := range allParams { + result := getResultMapFromParams(t, params) - require.GreaterOrEqual(t, test.expected.(modules.ChainBlockHeaderResponse).Number, v.Number) + number := getResultNumber(t, result) + assert.Equal(t, uint(i+1), number) - require.NotNil(t, test.expected.(modules.ChainBlockHeaderResponse).ParentHash) - require.NotNil(t, test.expected.(modules.ChainBlockHeaderResponse).StateRoot) - require.NotNil(t, test.expected.(modules.ChainBlockHeaderResponse).ExtrinsicsRoot) - require.NotNil(t, test.expected.(modules.ChainBlockHeaderResponse).Digest) + assertResultRegex(t, result, "parentHash", regex32BytesHex) + assertResultRegex(t, result, "stateRoot", regex32BytesHex) + assertResultRegex(t, result, "extrinsicsRoot", regex32BytesHex) + assertResultDigest(t, result) - //save for chain_getBlock - chainBlockHeaderHash = v.ParentHash - case *modules.ChainBlockResponse: - t.Log("Will assert ChainBlockResponse", "value", v.Block) + remainingExpected := subscription.Params{ + Result: map[string]interface{}{}, + SubscriptionID: 1, + } + assert.Equal(t, remainingExpected, params) + } + }) - //reset - chainBlockHeaderHash = "" + t.Run("state_subscribeStorage", func(t *testing.T) { + t.Parallel() - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block) + const numberOfMesages = 2 + messages := callAndSubscribeWebsocket(ctx, t, endpoint, "state_subscribeStorage", "[]", numberOfMesages) - require.GreaterOrEqual(t, test.expected.(modules.ChainBlockResponse).Block.Header.Number, v.Block.Header.Number) + allParams := make([]subscription.Params, numberOfMesages) + for i := range allParams { + message := messages[i] + err := rpc.Decode(message, &allParams[i]) + require.NoError(t, err, "cannot decode websocket message for message index %d", i) + } + + for i, params := range allParams { + errorContext := fmt.Sprintf("for response at index %d", i) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Header.ParentHash) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Header.StateRoot) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Header.ExtrinsicsRoot) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Header.Digest) + result := getResultMapFromParams(t, params) - require.NotNil(t, test.expected.(modules.ChainBlockResponse).Block.Body) - require.GreaterOrEqual(t, len(test.expected.(modules.ChainBlockResponse).Block.Body), 0) + blockHex, ok := result["block"].(string) + require.True(t, ok, errorContext) + assert.Regexp(t, regex32BytesHex, blockHex, errorContext) + delete(result, "block") - case *string: - t.Log("Will assert ChainBlockNumberRequest", "value", *v) - require.NotNil(t, v) - require.GreaterOrEqual(t, len(*v), 66) + changes, ok := result["changes"].([]interface{}) + require.True(t, ok, errorContext) + for _, change := range changes { + fromTo, ok := change.([]interface{}) + require.Truef(t, ok, "%s and change: %v", errorContext, change) + from, ok := fromTo[0].(string) + require.Truef(t, ok, "%s and from: %v", errorContext, fromTo[0]) + to, ok := fromTo[1].(string) + require.Truef(t, ok, "%s and to: %v", errorContext, fromTo[1]) + assert.Regexp(t, regexBytesHexOrEmpty, from, errorContext) + assert.Regexp(t, regexBytesHexOrEmpty, to, errorContext) } + delete(result, "changes") - }) - } + remainingExpected := map[string]interface{}{} + assert.Equal(t, remainingExpected, result, errorContext) + } + }) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) -} + t.Run("chain_subscribeFinalizedHeads", func(t *testing.T) { + t.Parallel() -func TestChainSubscriptionRPC(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } + const numberOfMesages = 4 + messages := callAndSubscribeWebsocket(ctx, t, endpoint, "chain_subscribeFinalizedHeads", "[]", numberOfMesages) - testCases := []*testCase{ - { - description: "test chain_subscribeNewHeads", - method: "chain_subscribeNewHeads", - expected: []interface{}{1, - map[string](interface{}){ - "subscription": float64(1), - "result": map[string](interface{}){ - "number": "0x01", - "parentHash": "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21", - "stateRoot": "0x3b1a31d10d4d8a444579fd5a3fb17cbe6bebba9d939d88fe7bafb9d48036abb5", - "extrinsicsRoot": "0x8025c0d64df303f79647611c8c2b0a77bc2247ee12d851df4624e1f71ebb3aed", - //nolint:lll - "digest": map[string](interface{}){"logs": []interface{}{ - "0x0642414245c101c809062df1d1271d6a50232754baa64870515a7ada927886467748a220972c6d58347fd7317e286045604c5ddb78b84018c4b3a3836ee6626c8da6957338720053588d9f29c307fade658661d8d6a57c525f48553a253cf6e1475dbd319ca90200000000000000000e00000000000000", - "0x054241424501017cac567e5b5688260d9d0a1f7fe6a9f81ae0f1900a382e1c73a4929fcaf6e33ed9e7347eb81ebb2699d58f6c8b01c7bdf0714e5f6f4495bc4b5fb3becb287580"}}}}}, - params: "[]", - skip: false, - }, - { - description: "test state_subscribeStorage", - method: "state_subscribeStorage", - expected: "", - params: "[]", - skip: true, - }, - { - description: "test chain_finalizedHeads", - method: "chain_subscribeFinalizedHeads", - expected: []interface{}{1, - map[string](interface{}){ - "subscription": float64(1), - "result": map[string](interface{}){ - "number": "0x01", - "parentHash": "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21", - "stateRoot": "0x3b1a31d10d4d8a444579fd5a3fb17cbe6bebba9d939d88fe7bafb9d48036abb5", - "extrinsicsRoot": "0x8025c0d64df303f79647611c8c2b0a77bc2247ee12d851df4624e1f71ebb3aed", - //nolint:lll - "digest": map[string](interface{}){"logs": []interface{}{ - "0x0642414245c101c809062df1d1271d6a50232754baa64870515a7ada927886467748a220972c6d58347fd7317e286045604c5ddb78b84018c4b3a3836ee6626c8da6957338720053588d9f29c307fade658661d8d6a57c525f48553a253cf6e1475dbd319ca90200000000000000000e00000000000000", - "0x054241424501017cac567e5b5688260d9d0a1f7fe6a9f81ae0f1900a382e1c73a4929fcaf6e33ed9e7347eb81ebb2699d58f6c8b01c7bdf0714e5f6f4495bc4b5fb3becb287580"}}}}}, - params: "[]", - skip: false, - }, - } + allParams := make([]subscription.Params, numberOfMesages) + for i, message := range messages { + err := rpc.Decode(message, &allParams[i]) + require.NoError(t, err, "cannot decode websocket message for message index %d", i) + } - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodesWebsocket(t, 1, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) + var blockNumbers []uint + for _, params := range allParams { + result := getResultMapFromParams(t, params) - time.Sleep(time.Second) // give server a second to start + number := getResultNumber(t, result) + blockNumbers = append(blockNumbers, number) - for _, test := range testCases { + assertResultRegex(t, result, "parentHash", regex32BytesHex) + assertResultRegex(t, result, "stateRoot", regex32BytesHex) + assertResultRegex(t, result, "extrinsicsRoot", regex32BytesHex) + assertResultDigest(t, result) - t.Run(test.description, func(t *testing.T) { - callWebsocket(t, test) - }) - } + remainingExpected := subscription.Params{ + Result: map[string]interface{}{}, + SubscriptionID: 1, + } + assert.Equal(t, remainingExpected, params) + } - time.Sleep(time.Second * 2) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // Check block numbers grow by zero or one in order of responses. + for i, blockNumber := range blockNumbers { + if i == 0 { + assert.Equal(t, uint(1), blockNumber) + continue + } + assert.GreaterOrEqual(t, blockNumber, blockNumbers[i-1]) + } + }) } -func callWebsocket(t *testing.T, test *testCase) { - if test.skip { - t.Skip("Websocket endpoint not yet implemented") - } - url := "ws://localhost:8546/" // todo don't hard code this - ws, _, err := websocket.DefaultDialer.Dial(url, nil) - require.NoError(t, err) - defer ws.Close() +func getResultMapFromParams(t *testing.T, params subscription.Params) ( + resultMap map[string]interface{}) { + t.Helper() - done := make(chan struct{}) + resultMap, ok := params.Result.(map[string]interface{}) + require.True(t, ok) - vals := make(chan []byte) - go wsListener(t, ws, vals, done, len(test.expected.([]interface{}))) + return resultMap +} - err = ws.WriteMessage(websocket.TextMessage, []byte(`{ - "jsonrpc": "2.0", - "method": "`+test.method+`", - "params": [`+test.params+`], - "id": 1 -}`)) +// getResultNumber returns the number value from the result map +// and deletes the "number" key from the map. +func getResultNumber(t *testing.T, result map[string]interface{}) uint { + t.Helper() + + hexNumber, ok := result["number"].(string) + require.True(t, ok) + + number, err := common.HexToUint(hexNumber) require.NoError(t, err) - resCount := 0 - for { - select { - case v := <-vals: - resCount++ - switch exp := test.expected.([]interface{})[resCount-1].(type) { - case int: - // check for result subscription number - resNum := 0 - err = utils.DecodeWebsocket(t, v, &resNum) - require.NoError(t, err) - - case map[string]interface{}: - // check result map response - resMap := make(map[string]interface{}) - err = utils.DecodeWebsocket(t, v, &resMap) - require.NoError(t, err) - - // check values in map are expected type - for eKey, eVal := range exp { - rVal := resMap[eKey] - require.NotNil(t, rVal) - require.IsType(t, eVal, rVal) - switch evt := eVal.(type) { - case map[string]interface{}: - checkMap(t, evt, rVal.(map[string]interface{})) - } - } - } + delete(result, "number") - case <-done: - return - } - } + return number } -func wsListener(t *testing.T, ws *websocket.Conn, val chan []byte, done chan struct{}, msgCount int) { - defer close(done) - count := 0 - for { - _, message, err := ws.ReadMessage() - require.NoError(t, err) - - count++ - log.Printf("recv: %v: %s\n", count, message) - - val <- message - if count == msgCount { - err := ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - require.NoError(t, err) - return - } +// assertResultRegex gets the value from the map and asserts that it matches the regex. +// It then removes the key from the map. +func assertResultRegex(t *testing.T, result map[string]interface{}, key, regex string) { + t.Helper() + + value, ok := result[key] + require.True(t, ok, "cannot find key %q in result", key) + assert.Regexp(t, regex, value, "at result key %q", key) + delete(result, key) +} + +func assertResultDigest(t *testing.T, result map[string]interface{}) { + t.Helper() + + digest, ok := result["digest"].(map[string]interface{}) + require.True(t, ok) + + logs, ok := digest["logs"].([]interface{}) + require.True(t, ok) + + assert.NotEmpty(t, logs) + for _, log := range logs { + assert.Regexp(t, regexBytesHex, log) } + + delete(result, "digest") } -func checkMap(t *testing.T, expMap map[string]interface{}, ckMap map[string]interface{}) { - for eKey, eVal := range expMap { - cVal := ckMap[eKey] +func callAndSubscribeWebsocket(ctx context.Context, t *testing.T, + endpoint, method, params string, numberOfMesages uint) ( + messages [][]byte) { + t.Helper() - require.NotNil(t, cVal) - require.IsType(t, eVal, cVal) - switch evt := eVal.(type) { - case map[string]interface{}: - checkMap(t, evt, cVal.(map[string]interface{})) - } + connection, _, err := websocket.DefaultDialer.Dial(endpoint, nil) + require.NoError(t, err, "cannot dial websocket") + defer connection.Close() // in case of failed required assertion + + const maxid = 100000 // otherwise it becomes a float64 + id := rand.Intn(maxid) + messageData := fmt.Sprintf(`{ + "jsonrpc": "2.0", + "method": %q, + "params": [%s], + "id": %d +}`, method, params, id) + err = connection.WriteMessage(websocket.TextMessage, []byte(messageData)) + require.NoError(t, err, "cannot write websocket message") + + // Read subscription id result + var target subscription.ResponseJSON + err = connection.ReadJSON(&target) + require.NoError(t, err, "cannot read websocket message") + assert.Equal(t, float64(id), target.ID, "request id mismatch") + assert.NotZero(t, target.Result, "subscription id is 0") + + for i := uint(0); i < numberOfMesages; i++ { + _, data, err := connection.ReadMessage() + require.NoError(t, err, "cannot read websocket message") + + messages = append(messages, data) } + // Close connection + const messageType = websocket.CloseMessage + data := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + err = connection.WriteMessage(messageType, data) + assert.NoError(t, err, "cannot write close websocket message") + err = connection.Close() + assert.NoError(t, err, "cannot close websocket connection") + + return messages } diff --git a/tests/rpc/rpc_04-offchain_test.go b/tests/rpc/rpc_04-offchain_test.go index b31dba704c..78ec0b4739 100644 --- a/tests/rpc/rpc_04-offchain_test.go +++ b/tests/rpc/rpc_04-offchain_test.go @@ -6,52 +6,56 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestOffchainRPC(t *testing.T) { + t.SkipNow() // TODO + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test offchain_localStorageSet", - method: "offchain_localStorageSet", - skip: true, - }, - { //TODO - description: "test offchain_localStorageGet", - method: "offchain_localStorageGet", - skip: true, - }, - { //TODO - description: "test offchain_localStorageGet", - method: "offchain_localStorageGet", - skip: true, - }, - } + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Core.BABELead = true + tomlConfig.Init.Genesis = genesisPath + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + t.Run("offchain_localStorageSet", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + var response struct{} // TODO - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + fetchWithTimeout(ctx, t, "offchain_localStorageSet", "", &response) + + // TODO assert response + }) + + t.Run("offchain_localStorageGet", func(t *testing.T) { + t.Parallel() + + var response struct{} // TODO + + fetchWithTimeout(ctx, t, "offchain_localStorageGet", "", &response) + + // TODO assert response + }) + + t.Run("offchain_localStorageGet", func(t *testing.T) { + t.Parallel() + + var response struct{} // TODO + + fetchWithTimeout(ctx, t, "offchain_localStorageGet", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/rpc_05-state_test.go b/tests/rpc/rpc_05-state_test.go index 1d22a6a8e0..943c5b79f5 100644 --- a/tests/rpc/rpc_05-state_test.go +++ b/tests/rpc/rpc_05-state_test.go @@ -11,7 +11,11 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/lib/common" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/stretchr/testify/require" ) @@ -21,115 +25,146 @@ func TestStateRPCResponseValidation(t *testing.T) { return } - t.Log("starting gossamer...") + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) + getBlockHashCtx, getBlockHashCancel := context.WithTimeout(ctx, time.Second) + blockHash, err := rpc.GetBlockHash(getBlockHashCtx, node.RPCPort(), "") + getBlockHashCancel() require.NoError(t, err) - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + t.Run("state_call", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + const params = `["", "","0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21"]` + var response modules.StateCallResponse - ctx := context.Background() + fetchWithTimeout(ctx, t, "state_call", params, &response) - getBlockHashCtx, cancel := context.WithTimeout(ctx, time.Second) - blockHash, err := utils.GetBlockHash(getBlockHashCtx, t, nodes[0].RPCPort, "") - cancel() - require.NoError(t, err) + // TODO assert stateCallResponse + }) - testCases := []*testCase{ - { - description: "Test state_call", - method: "state_call", - params: `["", "","0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21"]`, - expected: modules.StateCallResponse{}, - }, - { //TODO disable skip when implemented - description: "Test state_getKeysPaged", - method: "state_getKeysPaged", - skip: true, - }, - { - description: "Test state_queryStorage", - method: "state_queryStorage", - params: fmt.Sprintf( - `[["0xf2794c22e353e9a839f12faab03a911bf68967d635641a7087e53f2bff1ecad3c6756fee45ec79ead60347fffb770bcdf0ec74da701ab3d6495986fe1ecc3027"], "%s", null]`, //nolint:lll - blockHash), - expected: modules.StorageChangeSetResponse{ - Block: &blockHash, - Changes: [][]string{}, - }, - skip: true, - }, - { - description: "Test valid block hash state_getRuntimeVersion", - method: "state_getRuntimeVersion", - params: fmt.Sprintf(`["%s"]`, blockHash.String()), - expected: modules.StateRuntimeVersionResponse{}, - }, - { - description: "Test valid block hash state_getPairs", - method: "state_getPairs", - params: fmt.Sprintf(`["0x", "%s"]`, blockHash.String()), - expected: modules.StatePairResponse{}, - }, - { - description: "Test valid block hash state_getMetadata", - method: "state_getMetadata", - params: fmt.Sprintf(`["%s"]`, blockHash.String()), - expected: modules.StateMetadataResponse(""), - }, - { - description: "Test optional param state_getRuntimeVersion", - method: "state_getRuntimeVersion", - params: `[]`, - expected: modules.StateRuntimeVersionResponse{}, - }, - { - description: "Test optional params hash state_getPairs", - method: "state_getPairs", - params: `["0x"]`, - expected: modules.StatePairResponse{}, - }, - { - description: "Test optional param hash state_getMetadata", - method: "state_getMetadata", - params: `[]`, - expected: modules.StateMetadataResponse(""), - }, - { - description: "Test optional param value as null state_getRuntimeVersion", - method: "state_getRuntimeVersion", - params: `[null]`, - expected: modules.StateRuntimeVersionResponse{}, - }, - { - description: "Test optional param value as null state_getMetadata", - method: "state_getMetadata", - params: `[null]`, - expected: modules.StateMetadataResponse(""), - }, - { - description: "Test optional param value as null state_getPairs", - method: "state_getPairs", - params: `["0x", null]`, - expected: modules.StatePairResponse{}, - }, - } + t.Run("state_getKeysPaged", func(t *testing.T) { + t.Parallel() + t.SkipNow() - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + var response struct{} // TODO + fetchWithTimeout(ctx, t, "state_getKeysPaged", "", &response) + + // TODO assert response + }) + + t.Run("state_queryStorage", func(t *testing.T) { + t.Parallel() + t.SkipNow() // TODO disable skip + + params := fmt.Sprintf( + `[["0xf2794c22e353e9a839f12faab03a911bf68967d635641a7087e53f2bff1ecad3c6756fee45ec79ead60347fffb770bcdf0ec74da701ab3d6495986fe1ecc3027"], "%s", null]`, //nolint:lll + blockHash) + var response modules.StorageChangeSetResponse + + fetchWithTimeout(ctx, t, "state_queryStorage", params, &response) + + // TODO assert response + }) + t.Run("state_getRuntimeVersion", func(t *testing.T) { + t.Parallel() + + params := fmt.Sprintf(`[%q]`, blockHash) + var response modules.StateRuntimeVersionResponse + + fetchWithTimeout(ctx, t, "state_getRuntimeVersion", params, &response) + + // TODO assert response + }) + + t.Run("valid block hash state_getPairs", func(t *testing.T) { + t.Parallel() + + params := fmt.Sprintf(`["0x", "%s"]`, blockHash) + var response modules.StatePairResponse + + fetchWithTimeout(ctx, t, "state_getPairs", params, &response) + + // TODO assert response + }) + + t.Run("valid block hash state_getMetadata", func(t *testing.T) { + t.Parallel() + + params := fmt.Sprintf(`["%s"]`, blockHash) + var response modules.StateMetadataResponse + + fetchWithTimeout(ctx, t, "state_getMetadata", params, &response) + + // TODO assert response + }) + + t.Run("valid block hash state_getRuntimeVersion", func(t *testing.T) { + t.Parallel() + + var response modules.StateRuntimeVersionResponse + + fetchWithTimeout(ctx, t, "state_getRuntimeVersion", "[]", &response) + + // TODO assert response + }) + + t.Run("optional params hash state_getPairs", func(t *testing.T) { + t.Parallel() + + var response modules.StatePairResponse + + fetchWithTimeout(ctx, t, "state_getPairs", `["0x"]`, &response) + + // TODO assert response + }) + + t.Run("optional param hash state_getMetadata", func(t *testing.T) { + t.Parallel() + + var response modules.StateMetadataResponse + + fetchWithTimeout(ctx, t, "state_getMetadata", "[]", &response) + + // TODO assert response + }) + + t.Run("optional param value as null state_getRuntimeVersion", func(t *testing.T) { + t.Parallel() + + var response modules.StateRuntimeVersionResponse + + fetchWithTimeout(ctx, t, "state_getRuntimeVersion", "[null]", &response) + + // TODO assert response + }) + + t.Run("optional param value as null state_getMetadata", func(t *testing.T) { + t.Parallel() + + var response modules.StateMetadataResponse + + fetchWithTimeout(ctx, t, "state_getMetadata", "[null]", &response) + + // TODO assert response + }) + + t.Run("optional param value as null state_getPairs", func(t *testing.T) { + t.Parallel() + + var response modules.StatePairResponse + + fetchWithTimeout(ctx, t, "state_getPairs", `["0x", null]`, &response) + + // TODO assert response + }) } func TestStateRPCAPI(t *testing.T) { @@ -138,23 +173,19 @@ func TestStateRPCAPI(t *testing.T) { return } - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) time.Sleep(5 * time.Second) // Wait for block production - ctx := context.Background() - - getBlockHashCtx, cancel := context.WithTimeout(ctx, time.Second) - blockHash, err := utils.GetBlockHash(getBlockHashCtx, t, nodes[0].RPCPort, "") - cancel() + getBlockHashCtx, getBlockHashCancel := context.WithTimeout(ctx, time.Second) + blockHash, err := rpc.GetBlockHash(getBlockHashCtx, node.RPCPort(), "") + getBlockHashCancel() require.NoError(t, err) const ( @@ -331,10 +362,9 @@ func TestStateRPCAPI(t *testing.T) { // Cases for valid block hash in RPC params for _, test := range testCases { t.Run(test.description, func(t *testing.T) { - ctx := context.Background() postRPCCtx, cancel := context.WithTimeout(ctx, time.Second) - endpoint := utils.NewEndpoint(nodes[0].RPCPort) - respBody, err := utils.PostRPC(postRPCCtx, endpoint, test.method, test.params) + endpoint := rpc.NewEndpoint(node.RPCPort()) + respBody, err := rpc.Post(postRPCCtx, endpoint, test.method, test.params) cancel() require.NoError(t, err) @@ -349,15 +379,13 @@ func TestRPCStructParamUnmarshal(t *testing.T) { return } - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Core.BABELead = true + tomlConfig.Init.Genesis = genesisPath + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) time.Sleep(2 * time.Second) // Wait for block production @@ -367,12 +395,10 @@ func TestRPCStructParamUnmarshal(t *testing.T) { params: `[["0xf2794c22e353e9a839f12faab03a911bf68967d635641a7087e53f2bff1ecad3c6756fee45ec79ead60347fffb770bcdf0ec74da701ab3d6495986fe1ecc3027"],"0xa32c60dee8647b07435ae7583eb35cee606209a595718562dd4a486a07b6de15", null]`, //nolint:lll } t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - - postRPCCtx, cancel := context.WithTimeout(ctx, time.Second) - endpoint := utils.NewEndpoint(nodes[0].RPCPort) - respBody, err := utils.PostRPC(postRPCCtx, endpoint, test.method, test.params) - cancel() + postRPCCtx, postRPCCancel := context.WithTimeout(ctx, time.Second) + endpoint := rpc.NewEndpoint(node.RPCPort()) + respBody, err := rpc.Post(postRPCCtx, endpoint, test.method, test.params) + postRPCCancel() require.NoError(t, err) require.NotContains(t, string(respBody), "json: cannot unmarshal") fmt.Println(string(respBody)) diff --git a/tests/rpc/rpc_06-engine_test.go b/tests/rpc/rpc_06-engine_test.go index 4f5a3020ab..53b9c5d2c8 100644 --- a/tests/rpc/rpc_06-engine_test.go +++ b/tests/rpc/rpc_06-engine_test.go @@ -6,47 +6,44 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestEngineRPC(t *testing.T) { + t.SkipNow() + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test engine_createBlock", - method: "engine_createBlock", - skip: true, - }, - { //TODO - description: "test engine_finalizeBlock", - method: "engine_finalizeBlock", - skip: true, - }, - } + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + t.Run("engine_createBlock", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + var response struct{} // TODO + fetchWithTimeout(ctx, t, "engine_createBlock", "", &response) - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + // TODO assert response + }) + + t.Run("engine_finalizeBlock", func(t *testing.T) { + t.Parallel() + + var response struct{} // TODO + fetchWithTimeout(ctx, t, "engine_finalizeBlock", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/rpc_07-payment_test.go b/tests/rpc/rpc_07-payment_test.go index 8639ff417d..f75d0b347c 100644 --- a/tests/rpc/rpc_07-payment_test.go +++ b/tests/rpc/rpc_07-payment_test.go @@ -6,42 +6,36 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestPaymentRPC(t *testing.T) { + t.SkipNow() // TODO + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test payment_queryInfo", - method: "payment_queryInfo", - skip: true, - }, - } + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + t.Run("payment_queryInfo", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + var response struct{} // TODO - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + fetchWithTimeout(ctx, t, "payment_queryInfo", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/rpc_08-contracts_test.go b/tests/rpc/rpc_08-contracts_test.go index 21b33d1283..0dd42ac095 100644 --- a/tests/rpc/rpc_08-contracts_test.go +++ b/tests/rpc/rpc_08-contracts_test.go @@ -6,47 +6,35 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestContractsRPC(t *testing.T) { + t.SkipNow() // TODO + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test contracts_getStorage", - method: "contracts_getStorage", - skip: true, - }, - { //TODO - description: "test contracts_getStorage", - method: "contracts_getStorage", - skip: true, - }, - } - - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - time.Sleep(time.Second) // give server a second to start + t.Run("contracts_getStorage", func(t *testing.T) { + t.Parallel() - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + var response struct{} // TODO + fetchWithTimeout(ctx, t, "contracts_getStorage", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/rpc_09-babe_test.go b/tests/rpc/rpc_09-babe_test.go index 0f8318ffea..e97185c0ab 100644 --- a/tests/rpc/rpc_09-babe_test.go +++ b/tests/rpc/rpc_09-babe_test.go @@ -6,42 +6,36 @@ package rpc import ( "context" "testing" - "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" ) func TestBabeRPC(t *testing.T) { + t.SkipNow() // TODO + if utils.MODE != rpcSuite { t.Log("Going to skip RPC suite tests") return } - testCases := []*testCase{ - { //TODO - description: "test babe_epochAuthorship", - method: "babe_epochAuthorship", - skip: true, - }, - } + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.Core.BABELead = true + node := node.New(t, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + node.InitAndStartTest(ctx, t, cancel) - t.Log("starting gossamer...") - nodes, err := utils.InitializeAndStartNodes(t, 1, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + t.Run("babe_epochAuthorship", func(t *testing.T) { + t.Parallel() - time.Sleep(time.Second) // give server a second to start + var response struct{} // TODO - for _, test := range testCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() - getResponseCtx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - _ = getResponse(getResponseCtx, t, test) - }) - } + fetchWithTimeout(ctx, t, "babe_epochAuthorship", "", &response) - t.Log("going to tear down gossamer...") - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) + // TODO assert response + }) } diff --git a/tests/rpc/system_integration_test.go b/tests/rpc/system_integration_test.go index 809e172b28..83d4baff08 100644 --- a/tests/rpc/system_integration_test.go +++ b/tests/rpc/system_integration_test.go @@ -5,94 +5,113 @@ package rpc import ( "context" - "fmt" - "reflect" - "strconv" "testing" + "time" + "github.com/ChainSafe/gossamer/dot/config/toml" "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestStableNetworkRPC(t *testing.T) { - if utils.MODE != "stable" { - t.Skip("Integration tests are disabled, going to skip.") + if utils.MODE != "rpc" { + t.Skip("RPC tests are disabled, going to skip.") } - t.Log("Running NetworkAPI tests with HOSTNAME=" + utils.HOSTNAME + " and PORT=" + utils.PORT) - networkSize, err := strconv.Atoi(utils.NETWORK_SIZE) - if err != nil { - networkSize = 0 - } - - testsCases := []*testCase{ - { - description: "test system_health", - method: "system_health", - expected: modules.SystemHealthResponse{ - Peers: networkSize - 1, - IsSyncing: true, - ShouldHavePeers: true, - }, - }, - { - description: "test system_network_state", - method: "system_networkState", - expected: modules.SystemNetworkStateResponse{ - NetworkState: modules.NetworkStateString{ - PeerID: "", - }, - }, + const numberOfNodes = 3 + config := toml.Config{ + RPC: toml.RPCConfig{ + Enabled: true, + Modules: []string{"system", "author", "chain"}, }, - { - description: "test system_peers", - method: "system_peers", - expected: modules.SystemPeersResponse{}, + Core: toml.CoreConfig{ + Roles: types.FullNodeRole, }, } - for _, test := range testsCases { - t.Run(test.description, func(t *testing.T) { - ctx := context.Background() + nodes := make(node.Nodes, numberOfNodes) + for i := range nodes { + nodes[i] = node.New(t, config, node.SetIndex(i)) + } + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + for _, node := range nodes { + node.InitAndStartTest(ctx, t, cancel) + const timeBetweenStart = 0 * time.Second + timer := time.NewTimer(timeBetweenStart) + select { + case <-timer.C: + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } - endpoint := fmt.Sprintf("http://%s:%s", utils.HOSTNAME, utils.PORT) - const params = "{}" - respBody, err := utils.PostRPC(ctx, endpoint, test.method, params) - require.NoError(t, err) + for _, node := range nodes { + node := node + t.Run(node.String(), func(t *testing.T) { + t.Parallel() + endpoint := rpc.NewEndpoint(node.RPCPort()) - target := reflect.New(reflect.TypeOf(test.expected)).Interface() - err = utils.DecodeRPC(t, respBody, target) - require.NoError(t, err) + t.Run("system_health", func(t *testing.T) { + t.Parallel() - switch v := target.(type) { - case *modules.SystemHealthResponse: - t.Log("Will assert SystemHealthResponse", "target", target) + var response modules.SystemHealthResponse - require.Equal(t, test.expected.(modules.SystemHealthResponse).IsSyncing, v.IsSyncing) - require.Equal(t, test.expected.(modules.SystemHealthResponse).ShouldHavePeers, v.ShouldHavePeers) - require.GreaterOrEqual(t, v.Peers, test.expected.(modules.SystemHealthResponse).Peers) + fetchWithTimeoutFromEndpoint(t, endpoint, "system_health", "{}", &response) - case *modules.SystemNetworkStateResponse: - t.Log("Will assert SystemNetworkStateResponse", "target", target) + expectedResponse := modules.SystemHealthResponse{ + Peers: numberOfNodes - 1, + IsSyncing: true, + ShouldHavePeers: true, + } + assert.Equal(t, expectedResponse, response) + }) - require.NotNil(t, v.NetworkState) - require.NotNil(t, v.NetworkState.PeerID) + t.Run("system_networkState", func(t *testing.T) { + t.Parallel() - case *modules.SystemPeersResponse: - t.Log("Will assert SystemPeersResponse", "target", target) + var response modules.SystemNetworkStateResponse - require.NotNil(t, *v) - require.GreaterOrEqual(t, len(*v), networkSize-2) + fetchWithTimeoutFromEndpoint(t, endpoint, "system_networkState", "{}", &response) - for _, vv := range *v { - require.NotNil(t, vv.PeerID) - require.NotNil(t, vv.Roles) - require.NotNil(t, vv.BestHash) - require.NotNil(t, vv.BestNumber) - } - } + // TODO assert response + }) + + t.Run("system_peers", func(t *testing.T) { + t.Parallel() + + var response modules.SystemPeersResponse + + fetchWithTimeoutFromEndpoint(t, endpoint, "system_peers", "{}", &response) + + assert.GreaterOrEqual(t, len(response), numberOfNodes-2) + + // TODO assert response + }) }) } } + +func fetchWithTimeoutFromEndpoint(t *testing.T, endpoint, method, + params string, target interface{}) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + body, err := rpc.Post(ctx, endpoint, method, params) + cancel() + require.NoError(t, err) + + err = rpc.Decode(body, target) + require.NoError(t, err) +} diff --git a/tests/stress/grandpa_test.go b/tests/stress/grandpa_test.go index a98935619b..ddd52585a8 100644 --- a/tests/stress/grandpa_test.go +++ b/tests/stress/grandpa_test.go @@ -5,31 +5,33 @@ package stress import ( "context" - "os" "testing" "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" - + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" "github.com/stretchr/testify/require" ) func TestStress_Grandpa_OneAuthority(t *testing.T) { - numNodes := 1 - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisDev, utils.ConfigDefault) - require.NoError(t, err) + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.Default() + tomlConfig.Core.BABELead = true + tomlConfig.Init.Genesis = genesisPath + n := node.New(t, tomlConfig) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + ctx, cancel := context.WithCancel(context.Background()) - time.Sleep(time.Second * 10) + n.InitAndStartTest(ctx, t, cancel) + nodes := node.Nodes{n} - ctx := context.Background() + time.Sleep(time.Second * 10) const getChainHeadTimeout = time.Second - compareChainHeadsWithRetry(ctx, t, nodes, getChainHeadTimeout) + compareChainHeadsWithRetry(ctx, nodes, getChainHeadTimeout) const getFinalizedHeadTimeout = time.Second prev, _ := compareFinalizedHeads(ctx, t, nodes, getFinalizedHeadTimeout) @@ -42,53 +44,51 @@ func TestStress_Grandpa_OneAuthority(t *testing.T) { func TestStress_Grandpa_ThreeAuthorities(t *testing.T) { t.Skip() - utils.GenerateGenesisThreeAuth() - defer os.Remove(utils.GenesisThreeAuths) + const numNodes = 3 + + genesisPath := utils.GenerateGenesisAuths(t, numNodes) - numNodes := 3 - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisThreeAuths, utils.ConfigDefault) - require.NoError(t, err) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + ctx, cancel := context.WithCancel(context.Background()) - ctx := context.Background() + nodes.InitAndStartTest(ctx, t, cancel) - numRounds := 5 - for i := 1; i < numRounds+1; i++ { - const getFinalizedHeadByRoundTimeout = time.Second - fin, err := compareFinalizedHeadsWithRetry(ctx, t, - nodes, uint64(i), getFinalizedHeadByRoundTimeout) + const numRounds uint64 = 5 + for round := uint64(1); round < numRounds+1; round++ { + const retryWait = time.Second + err := retry.UntilNoError(ctx, retryWait, func() (err error) { + const getFinalizedHeadByRoundTimeout = time.Second + _, err = compareFinalizedHeadsByRound(ctx, nodes, round, getFinalizedHeadByRoundTimeout) + return err + }) require.NoError(t, err) - t.Logf("finalised hash in round %d: %s", i, fin) } } func TestStress_Grandpa_SixAuthorities(t *testing.T) { t.Skip() - utils.GenerateGenesisSixAuth(t) - defer os.Remove(utils.GenesisSixAuths) - - numNodes := 6 - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisSixAuths, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() - - ctx := context.Background() - numRounds := 10 - for i := 1; i < numRounds+1; i++ { - const getFinalizedHeadByRoundTimeout = time.Second - fin, err := compareFinalizedHeadsWithRetry(ctx, t, nodes, - uint64(i), getFinalizedHeadByRoundTimeout) + const numNodes = 6 + genesisPath := utils.GenerateGenesisAuths(t, numNodes) + + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) + + const numRounds uint64 = 10 + for round := uint64(1); round < numRounds+1; round++ { + const retryWait = time.Second + err := retry.UntilNoError(ctx, retryWait, func() (err error) { + const getFinalizedHeadByRoundTimeout = time.Second + _, err = compareFinalizedHeadsByRound(ctx, nodes, round, getFinalizedHeadByRoundTimeout) + return err + }) require.NoError(t, err) - t.Logf("finalised hash in round %d: %s", i, fin) } } @@ -97,27 +97,24 @@ func TestStress_Grandpa_NineAuthorities(t *testing.T) { t.Skip("skipping TestStress_Grandpa_NineAuthorities") } - utils.CreateConfigLogGrandpa() - defer os.Remove(utils.ConfigLogGrandpa) - - numNodes := 9 - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisDefault, utils.ConfigLogGrandpa) - require.NoError(t, err) - - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() - - ctx := context.Background() - - numRounds := 3 - for i := 1; i < numRounds+1; i++ { - const getFinalizedHeadByRoundTimeout = time.Second - fin, err := compareFinalizedHeadsWithRetry(ctx, t, nodes, - uint64(i), getFinalizedHeadByRoundTimeout) + const numNodes = 9 + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + + tomlConfig := config.LogGrandpa() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) + + const numRounds uint64 = 3 + for round := uint64(1); round < numRounds+1; round++ { + const retryWait = time.Second + err := retry.UntilNoError(ctx, retryWait, func() (err error) { + const getFinalizedHeadByRoundTimeout = time.Second + _, err = compareFinalizedHeadsByRound(ctx, nodes, round, getFinalizedHeadByRoundTimeout) + return err + }) require.NoError(t, err) - t.Logf("finalised hash in round %d: %s", i, fin) } } @@ -126,34 +123,29 @@ func TestStress_Grandpa_CatchUp(t *testing.T) { t.Skip("skipping TestStress_Grandpa_CatchUp") } - utils.GenerateGenesisSixAuth(t) - defer os.Remove(utils.GenesisSixAuths) - - numNodes := 6 - nodes, err := utils.InitializeAndStartNodes(t, numNodes-1, utils.GenesisSixAuths, utils.ConfigDefault) - require.NoError(t, err) + const numNodes = 6 + genesisPath := utils.GenerateGenesisAuths(t, numNodes) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) time.Sleep(time.Second * 70) // let some rounds run - node, err := utils.RunGossamer(t, numNodes-1, - utils.TestDir(t, utils.KeyList[numNodes-1]), - utils.GenesisSixAuths, utils.ConfigDefault, - false, false) - require.NoError(t, err) + node := node.New(t, tomlConfig, node.SetIndex(numNodes-1)) + node.InitAndStartTest(ctx, t, cancel) nodes = append(nodes, node) - ctx := context.Background() - - numRounds := 10 - for i := 1; i < numRounds+1; i++ { - const getFinalizedHeadByRoundTimeout = time.Second - fin, err := compareFinalizedHeadsWithRetry(ctx, t, nodes, uint64(i), getFinalizedHeadByRoundTimeout) + const numRounds uint64 = 10 + for round := uint64(1); round < numRounds+1; round++ { + const retryWait = time.Second + err := retry.UntilNoError(ctx, retryWait, func() (err error) { + const getFinalizedHeadByRoundTimeout = time.Second + _, err = compareFinalizedHeadsByRound(ctx, nodes, round, getFinalizedHeadByRoundTimeout) + return err + }) require.NoError(t, err) - t.Logf("finalised hash in round %d: %s", i, fin) } } diff --git a/tests/stress/helpers.go b/tests/stress/helpers.go index 1af4f77701..f47bbe2020 100644 --- a/tests/stress/helpers.go +++ b/tests/stress/helpers.go @@ -7,35 +7,39 @@ import ( "context" "errors" "fmt" + "strings" "testing" "time" "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/retry" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/stretchr/testify/require" ) var ( - maxRetries = 32 - testTimeout = time.Minute * 3 - logger = log.NewFromGlobal(log.AddContext("pkg", "tests/stress")) + logger = log.NewFromGlobal(log.AddContext("pkg", "tests/stress")) ) // compareChainHeads calls getChainHead for each node in the array // it returns a map of chainHead hashes to node key names, and an error if the hashes don't all match -func compareChainHeads(ctx context.Context, t *testing.T, nodes []utils.Node, +func compareChainHeads(ctx context.Context, nodes node.Nodes, getChainHeadTimeout time.Duration) (hashes map[common.Hash][]string, err error) { hashes = make(map[common.Hash][]string) for _, node := range nodes { getChainHeadCtx, cancel := context.WithTimeout(ctx, getChainHeadTimeout) - header := utils.GetChainHead(getChainHeadCtx, t, node.RPCPort) + header, err := rpc.GetChainHead(getChainHeadCtx, node.RPCPort()) cancel() + if err != nil { + return nil, fmt.Errorf("cannot get chain head for node %s: %w", node, err) + } - logger.Infof("got header with hash %s from node with key %s", header.Hash(), node.Key) - hashes[header.Hash()] = append(hashes[header.Hash()], node.Key) + logger.Infof("got header with hash %s from node %s", header.Hash(), node) + hashes[header.Hash()] = append(hashes[header.Hash()], node.Key()) } if len(hashes) != 1 { @@ -45,16 +49,14 @@ func compareChainHeads(ctx context.Context, t *testing.T, nodes []utils.Node, return hashes, err } -// compareChainHeadsWithRetry calls compareChainHeads, retrying up to maxRetries times if it errors. -func compareChainHeadsWithRetry(ctx context.Context, t *testing.T, nodes []utils.Node, +// compareChainHeadsWithRetry calls compareChainHeads, +// retrying until the context gets canceled. +func compareChainHeadsWithRetry(ctx context.Context, nodes node.Nodes, getChainHeadTimeout time.Duration) error { - var hashes map[common.Hash][]string - var err error - - for i := 0; i < maxRetries; i++ { - hashes, err = compareChainHeads(ctx, t, nodes, getChainHeadTimeout) + for { + hashes, err := compareChainHeads(ctx, nodes, getChainHeadTimeout) if err == nil { - break + return nil } timer := time.NewTimer(time.Second) @@ -64,87 +66,60 @@ func compareChainHeadsWithRetry(ctx context.Context, t *testing.T, nodes []utils if !timer.Stop() { <-timer.C } - return err // last error + return fmt.Errorf("%w: hashes=%v", err, hashes) // last error } } - - if err != nil { - err = fmt.Errorf("%w: hashes=%v", err, hashes) - } - - return err } +var errBlockHashNotOne = errors.New("expected 1 block hash") + // compareBlocksByNumber calls getBlockByNumber for each node in the array // it returns a map of block hashes to node key names, and an error if the hashes don't all match -func compareBlocksByNumber(ctx context.Context, t *testing.T, nodes []utils.Node, - num string) (hashToKeys map[common.Hash][]string) { - type resultContainer struct { - hash common.Hash - nodeKey string - err error - } - results := make(chan resultContainer) - - for _, node := range nodes { - go func(node utils.Node) { - result := resultContainer{ - nodeKey: node.Key, - } - - for { // retry until context gets canceled - result.hash, result.err = utils.GetBlockHash(ctx, t, node.RPCPort, num) - - if err := ctx.Err(); err != nil { - result.err = err - break - } - - if result.err == nil { - break +func compareBlocksByNumber(ctx context.Context, nodes node.Nodes, + num string) (nodeKeys []string, err error) { + blockHashes := make(map[common.Hash]struct{}, 1) + for _, n := range nodes { + const retryWait = time.Second + err := retry.UntilOK(ctx, retryWait, func() (ok bool, err error) { + hash, err := rpc.GetBlockHash(ctx, n.RPCPort(), num) + if err != nil { + const blockDoesNotExistString = "cannot find node with number greater than highest in blocktree" + if strings.Contains(err.Error(), blockDoesNotExistString) { + return false, nil // retry after retryWait has elapsed. } + return false, err // stop retrying } - results <- result - }(node) - } - - var err error - hashToKeys = make(map[common.Hash][]string, len(nodes)) - for range nodes { - result := <-results + blockHashes[hash] = struct{}{} + nodeKeys = append(nodeKeys, n.Key()) + return true, nil + }) if err != nil { - continue // one failed, we don't care anymore - } - - if result.err != nil { - err = result.err - continue + return nil, fmt.Errorf("for node %s and block number %s: %w", n, num, err) } - - hashToKeys[result.hash] = append(hashToKeys[result.hash], result.nodeKey) } - require.NoError(t, err) - require.Lenf(t, hashToKeys, 1, - "expected 1 block found for number %s but got %d block(s)", - num, len(hashToKeys)) + if len(blockHashes) != 1 { + return nil, fmt.Errorf("%w: but got %d block hashes for block number %s", + errBlockHashNotOne, len(blockHashes), num) + } - return hashToKeys + return nodeKeys, nil } // compareFinalizedHeads calls getFinalizedHeadByRound for each node in the array // it returns a map of finalisedHead hashes to node key names, and an error if the hashes don't all match -func compareFinalizedHeads(ctx context.Context, t *testing.T, nodes []utils.Node, +func compareFinalizedHeads(ctx context.Context, t *testing.T, nodes node.Nodes, getFinalizedHeadTimeout time.Duration) (hashes map[common.Hash][]string, err error) { hashes = make(map[common.Hash][]string) for _, node := range nodes { getFinalizedHeadCtx, cancel := context.WithTimeout(ctx, getFinalizedHeadTimeout) - hash := utils.GetFinalizedHead(getFinalizedHeadCtx, t, node.RPCPort) + hash, err := rpc.GetFinalizedHead(getFinalizedHeadCtx, node.RPCPort()) cancel() + require.NoError(t, err) - logger.Infof("got finalised head with hash %s from node with key %s", hash, node.Key) - hashes[hash] = append(hashes[hash], node.Key) + logger.Infof("got finalised head with hash %s from node %s", hash, node) + hashes[hash] = append(hashes[hash], node.Key()) } if len(hashes) == 0 { @@ -160,21 +135,21 @@ func compareFinalizedHeads(ctx context.Context, t *testing.T, nodes []utils.Node // compareFinalizedHeadsByRound calls getFinalizedHeadByRound for each node in the array // it returns a map of finalisedHead hashes to node key names, and an error if the hashes don't all match -func compareFinalizedHeadsByRound(ctx context.Context, t *testing.T, nodes []utils.Node, +func compareFinalizedHeadsByRound(ctx context.Context, nodes node.Nodes, round uint64, getFinalizedHeadByRoundTimeout time.Duration) ( hashes map[common.Hash][]string, err error) { hashes = make(map[common.Hash][]string) for _, node := range nodes { getFinalizedHeadByRoundCtx, cancel := context.WithTimeout(ctx, getFinalizedHeadByRoundTimeout) - hash, err := utils.GetFinalizedHeadByRound(getFinalizedHeadByRoundCtx, t, node.RPCPort, round) + hash, err := rpc.GetFinalizedHeadByRound(getFinalizedHeadByRoundCtx, node.RPCPort(), round) cancel() if err != nil { - return nil, err + return nil, fmt.Errorf("cannot get finalized head for round %d: %w", round, err) } - logger.Infof("got finalised head with hash %s from node with key %s at round %d", hash, node.Key, round) - hashes[hash] = append(hashes[hash], node.Key) + logger.Infof("got finalised head with hash %s from node %s at round %d", hash, node, round) + hashes[hash] = append(hashes[hash], node.Key()) } if len(hashes) == 0 { @@ -188,47 +163,15 @@ func compareFinalizedHeadsByRound(ctx context.Context, t *testing.T, nodes []uti return hashes, err } -// compareFinalizedHeadsWithRetry calls compareFinalizedHeadsByRound, retrying up to maxRetries times if it errors. -// it returns the finalised hash if it succeeds -func compareFinalizedHeadsWithRetry(ctx context.Context, t *testing.T, - nodes []utils.Node, round uint64, - getFinalizedHeadByRoundTimeout time.Duration) ( - hash common.Hash, err error) { - var hashes map[common.Hash][]string - - for i := 0; i < maxRetries; i++ { - hashes, err = compareFinalizedHeadsByRound(ctx, t, nodes, round, getFinalizedHeadByRoundTimeout) - if err == nil { - break - } - - if errors.Is(err, errFinalizedBlockMismatch) { - return common.Hash{}, fmt.Errorf("%w: round=%d hashes=%v", err, round, hashes) - } - - time.Sleep(3 * time.Second) - } - - if err != nil { - return common.Hash{}, fmt.Errorf("%w: round=%d hashes=%v", err, round, hashes) - } - - for h := range hashes { - return h, nil - } - - return common.Hash{}, nil -} - -func getPendingExtrinsics(ctx context.Context, t *testing.T, node utils.Node) []string { - endpoint := utils.NewEndpoint(node.RPCPort) - method := utils.AuthorPendingExtrinsics +func getPendingExtrinsics(ctx context.Context, t *testing.T, node node.Node) []string { + endpoint := rpc.NewEndpoint(node.RPCPort()) + const method = "author_pendingExtrinsics" const params = "[]" - respBody, err := utils.PostRPC(ctx, endpoint, method, params) + respBody, err := rpc.Post(ctx, endpoint, method, params) require.NoError(t, err) exts := new(modules.PendingExtrinsicsResponse) - err = utils.DecodeRPC(t, respBody, exts) + err = rpc.Decode(respBody, exts) require.NoError(t, err) return *exts diff --git a/tests/stress/network_test.go b/tests/stress/network_test.go index 3f1302b446..5398aa6147 100644 --- a/tests/stress/network_test.go +++ b/tests/stress/network_test.go @@ -8,7 +8,11 @@ import ( "testing" "time" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/ChainSafe/gossamer/internal/log" "github.com/stretchr/testify/require" @@ -16,25 +20,24 @@ import ( func TestNetwork_MaxPeers(t *testing.T) { numNodes := 9 // 9 block producers + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) utils.Logger.Patch(log.SetLevel(log.Info)) - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) - - defer func() { - errList := utils.TearDown(t, nodes) - require.Len(t, errList, 0) - }() + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) // wait for nodes to connect time.Sleep(time.Second * 10) - ctx := context.Background() - for i, node := range nodes { const getPeersTimeout = time.Second - getPeersCtx, cancel := context.WithTimeout(ctx, getPeersTimeout) - peers := utils.GetPeers(getPeersCtx, t, node.RPCPort) - cancel() + getPeersCtx, getPeersCancel := context.WithTimeout(ctx, getPeersTimeout) + peers, err := rpc.GetPeers(getPeersCtx, node.RPCPort()) + getPeersCancel() + + require.NoError(t, err) t.Logf("node %d: peer count=%d", i, len(peers)) require.LessOrEqual(t, len(peers), 5) diff --git a/tests/stress/stress_test.go b/tests/stress/stress_test.go index 869937dadc..9b0fe52abd 100644 --- a/tests/stress/stress_test.go +++ b/tests/stress/stress_test.go @@ -9,7 +9,7 @@ import ( "math/big" "math/rand" "os" - "strconv" + "path/filepath" "strings" "testing" "time" @@ -24,7 +24,11 @@ import ( gosstypes "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" + libutils "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" ) func TestMain(m *testing.M) { @@ -33,22 +37,6 @@ func TestMain(m *testing.M) { return } - if utils.HOSTNAME == "" { - utils.HOSTNAME = "localhost" - } - - utils.CreateConfigNoBabe() - utils.CreateDefaultConfig() - utils.CreateConfigNoGrandpa() - utils.CreateConfigNotAuthority() - - defer func() { - os.Remove(utils.ConfigNoBABE) - os.Remove(utils.ConfigDefault) - os.Remove(utils.ConfigNoGrandpa) - os.Remove(utils.ConfigNotAuthority) - }() - logLvl := log.Info if utils.LOGLEVEL != "" { var err error @@ -61,127 +49,130 @@ func TestMain(m *testing.M) { utils.Logger.Patch(log.SetLevel(logLvl)) logger.Patch(log.SetLevel(logLvl)) - utils.GenerateGenesisThreeAuth() - // Start all tests code := m.Run() os.Exit(code) } func TestRestartNode(t *testing.T) { - numNodes := 1 - nodes, err := utils.InitNodes(numNodes, utils.ConfigDefault) - require.NoError(t, err) + const numNodes = 1 + defaultConfig := config.Default() + nodes := node.MakeNodes(t, numNodes, defaultConfig) - err = utils.StartNodes(t, nodes) + err := nodes.Init(context.Background()) require.NoError(t, err) - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) + ctx, cancel := context.WithCancel(context.Background()) - err = utils.StartNodes(t, nodes) - require.NoError(t, err) + runtimeErrors, startErr := nodes.Start(ctx) + if startErr != nil { + stopNodes(cancel, runtimeErrors) + t.Fatalf("failed to start nodes: %s", startErr) + } - errList = utils.StopNodes(t, nodes) - require.Len(t, errList, 0) -} + stopNodes(cancel, runtimeErrors) -func TestSync_SingleBlockProducer(t *testing.T) { - numNodes := 4 - utils.Logger.Patch(log.SetLevel(log.Info)) + ctx, cancel = context.WithCancel(context.Background()) - // start block producing node first - node, err := utils.RunGossamer(t, numNodes-1, - utils.TestDir(t, utils.KeyList[numNodes-1]), - utils.GenesisDev, utils.ConfigNoGrandpa, - false, true) - require.NoError(t, err) + runtimeErrors, startErr = nodes.Start(ctx) + if startErr != nil { + stopNodes(cancel, runtimeErrors) + t.Fatalf("failed to start nodes: %s", startErr) + } - // wait and start rest of nodes - if they all start at the same time the first round usually doesn't complete since - // all nodes vote for different blocks. - time.Sleep(time.Second * 15) - nodes, err := utils.InitializeAndStartNodes(t, numNodes-1, utils.GenesisDev, utils.ConfigNotAuthority) - require.NoError(t, err) - nodes = append(nodes, node) + stopNodes(cancel, runtimeErrors) +} - time.Sleep(time.Second * 30) +func stopNodes(cancel context.CancelFunc, runtimeErrors []<-chan error) { + cancel() + for _, runtimeError := range runtimeErrors { + <-runtimeError + } +} - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() +func TestSync_SingleBlockProducer(t *testing.T) { + const numNodes = 4 + genesisPath := libutils.GetDevGenesisSpecPathTest(t) - numCmps := 10 - ctx := context.Background() + configNoGrandpa := config.NoGrandpa() + configNoGrandpa.Init.Genesis = genesisPath + configNoGrandpa.Core.BABELead = true + babeLeadNode := node.New(t, configNoGrandpa, node.SetIndex(numNodes-1)) - for i := 0; i < numCmps; i++ { - time.Sleep(3 * time.Second) - t.Log("comparing...", i) + configNoAuthority := config.NotAuthority() + configNoAuthority.Init.Genesis = genesisPath + noAuthorityNodes := node.MakeNodes(t, numNodes-1, configNoAuthority) - const comparisonTimeout = 5 * time.Second - compareCtx, cancel := context.WithTimeout(ctx, comparisonTimeout) + nodes := make(node.Nodes, 0, numNodes) + nodes = append(nodes, babeLeadNode) + nodes = append(nodes, noAuthorityNodes...) - hashes := compareBlocksByNumber(compareCtx, t, nodes, strconv.Itoa(i)) + const testTimeout = 20 * time.Minute + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) - cancel() + nodes.InitAndStartTest(ctx, t, cancel) - // there will only be one key in the mapping - for _, nodesWithHash := range hashes { - // allow 1 node to potentially not have synced. this is due to the need to increase max peer count - require.GreaterOrEqual(t, len(nodesWithHash), numNodes-1) - } + const blockNumbers = 10 + for blockNumber := 0; blockNumber < blockNumbers; blockNumber++ { + t.Logf("comparing block number %d...", blockNumber) + + nodeKeys, err := compareBlocksByNumber(ctx, nodes, fmt.Sprint(blockNumber)) + require.NoError(t, err) + require.Equal(t, len(nodeKeys), numNodes) } } func TestSync_Basic(t *testing.T) { - nodes, err := utils.InitializeAndStartNodes(t, 3, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + config := config.Default() + config.Init.Genesis = genesisPath + const numNodes = 3 + nodes := node.MakeNodes(t, numNodes, config) + + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) - ctx := context.Background() const getChainHeadTimeout = time.Second - err = compareChainHeadsWithRetry(ctx, t, nodes, getChainHeadTimeout) + err := compareChainHeadsWithRetry(ctx, nodes, getChainHeadTimeout) require.NoError(t, err) } func TestSync_MultipleEpoch(t *testing.T) { t.Skip("skipping TestSync_MultipleEpoch") + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) numNodes := 3 utils.Logger.Patch(log.SetLevel(log.Info)) // wait and start rest of nodes - if they all start at the same time the first round usually doesn't complete since - nodes, err := utils.InitializeAndStartNodes(t, numNodes, utils.GenesisDefault, utils.ConfigDefault) - require.NoError(t, err) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes, tomlConfig) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) time.Sleep(time.Second * 10) - ctx := context.Background() - slotDurationCtx, cancel := context.WithTimeout(ctx, time.Second) - slotDuration := utils.SlotDuration(slotDurationCtx, t, nodes[0].RPCPort) + slotDuration, err := rpc.SlotDuration(slotDurationCtx, nodes[0].RPCPort()) cancel() + require.NoError(t, err) epochLengthCtx, cancel := context.WithTimeout(ctx, time.Second) - epochLength := utils.EpochLength(epochLengthCtx, t, nodes[0].RPCPort) + epochLength, err := rpc.EpochLength(epochLengthCtx, nodes[0].RPCPort()) cancel() + require.NoError(t, err) // Wait for epoch to pass time.Sleep(time.Duration(uint64(slotDuration.Nanoseconds()) * epochLength)) // Just checking that everythings operating as expected getChainHeadCtx, cancel := context.WithTimeout(ctx, time.Second) - header := utils.GetChainHead(getChainHeadCtx, t, nodes[0].RPCPort) + header, err := rpc.GetChainHead(getChainHeadCtx, nodes[0].RPCPort()) cancel() + require.NoError(t, err) currentHeight := header.Number for i := uint(0); i < currentHeight; i++ { @@ -190,7 +181,8 @@ func TestSync_MultipleEpoch(t *testing.T) { const compareTimeout = 5 * time.Second compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, strconv.Itoa(int(i))) + _, err := compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(i)) + require.NoError(t, err) cancel() } @@ -201,26 +193,27 @@ func TestSync_SingleSyncingNode(t *testing.T) { t.Skip("skipping TestSync_SingleSyncingNode") utils.Logger.Patch(log.SetLevel(log.Info)) + ctx, cancel := context.WithCancel(context.Background()) + // start block producing node - alice, err := utils.RunGossamer(t, 0, - utils.TestDir(t, utils.KeyList[0]), utils.GenesisDev, - utils.ConfigDefault, false, true) - require.NoError(t, err) + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + blockProducingConfig := config.Default() + blockProducingConfig.Init.Genesis = genesisPath + blockProducingConfig.Core.BABELead = true + alice := node.New(t, blockProducingConfig, node.SetIndex(0)) + + alice.InitAndStartTest(ctx, t, cancel) + time.Sleep(time.Second * 15) // start syncing node - bob, err := utils.RunGossamer(t, 1, - utils.TestDir(t, utils.KeyList[1]), utils.GenesisDev, - utils.ConfigNoBABE, false, false) - require.NoError(t, err) + syncingNodeConfig := config.NoBabe() + syncingNodeConfig.Init.Genesis = genesisPath + bob := node.New(t, syncingNodeConfig, node.SetIndex(1)) - nodes := []utils.Node{alice, bob} - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + bob.InitAndStartTest(ctx, t, cancel) - ctx := context.Background() + nodes := node.Nodes{alice, bob} numCmps := 100 for i := 0; i < numCmps; i++ { @@ -229,7 +222,8 @@ func TestSync_SingleSyncingNode(t *testing.T) { const compareTimeout = 5 * time.Second compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, strconv.Itoa(i)) + _, err := compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(i)) + require.NoError(t, err) cancel() } @@ -240,18 +234,20 @@ func TestSync_Bench(t *testing.T) { const numBlocks uint = 64 // start block producing node - alice, err := utils.RunGossamer(t, 0, - utils.TestDir(t, utils.KeyList[1]), - utils.GenesisDev, utils.ConfigNoGrandpa, - false, true) - require.NoError(t, err) + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + configNoGrandpa := config.NoGrandpa() + configNoGrandpa.Init.Genesis = genesisPath + configNoGrandpa.Core.BABELead = true - ctx := context.Background() + alice := node.New(t, configNoGrandpa, node.SetIndex(0)) + + ctx, cancel := context.WithCancel(context.Background()) + alice.InitAndStartTest(ctx, t, cancel) for { - getChainHeadCtx, cancel := context.WithTimeout(ctx, time.Second) - header, err := utils.GetChainHeadWithError(getChainHeadCtx, t, alice.RPCPort) - cancel() + getChainHeadCtx, getChainCancel := context.WithTimeout(ctx, time.Second) + header, err := rpc.GetChainHead(getChainHeadCtx, alice.RPCPort()) + getChainCancel() if err != nil { continue } @@ -263,47 +259,46 @@ func TestSync_Bench(t *testing.T) { time.Sleep(3 * time.Second) } - pauseBabeCtx, cancel := context.WithTimeout(ctx, time.Second) - err = utils.PauseBABE(pauseBabeCtx, alice.RPCPort) - cancel() + pauseBabeCtx, pauseBabeCancel := context.WithTimeout(ctx, time.Second) + err := rpc.PauseBABE(pauseBabeCtx, alice.RPCPort()) + pauseBabeCancel() require.NoError(t, err) t.Log("BABE paused") // start syncing node - bob, err := utils.RunGossamer(t, 1, - utils.TestDir(t, utils.KeyList[0]), utils.GenesisDev, - utils.ConfigNotAuthority, false, true) + configNoAuthority := config.NotAuthority() + configNoAuthority.Init.Genesis = genesisPath + configNoAuthority.Core.BABELead = true + bob := node.New(t, configNoAuthority, node.SetIndex(1)) + + bob.InitAndStartTest(ctx, t, cancel) require.NoError(t, err) - nodes := []utils.Node{alice, bob} - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + nodes := node.Nodes{alice, bob} // see how long it takes to sync to block numBlocks last := numBlocks start := time.Now() var end time.Time + const retryWait = time.Second + const syncWaitTimeout = 3 * time.Minute + syncWaitCtx, syncWaitCancel := context.WithTimeout(ctx, syncWaitTimeout) for { - if time.Since(start) >= testTimeout { - t.Fatal("did not sync") - } - - getChainHeadCtx, getChainHeadCancel := context.WithTimeout(ctx, time.Second) - head, err := utils.GetChainHeadWithError(getChainHeadCtx, t, bob.RPCPort) + getChainHeadCtx, getChainHeadCancel := context.WithTimeout(syncWaitCtx, time.Second) + head, err := rpc.GetChainHead(getChainHeadCtx, bob.RPCPort()) getChainHeadCancel() - if err != nil { - continue - } - - if head.Number >= last { + if err == nil && head.Number >= last { end = time.Now() + syncWaitCancel() break } + + retryWaitCtx, retryWaitCancel := context.WithTimeout(syncWaitCtx, retryWait) + <-retryWaitCtx.Done() + retryWaitCancel() } maxTime := time.Second * 85 @@ -319,11 +314,12 @@ func TestSync_Bench(t *testing.T) { t.Log("comparing block...", numBlocks) const compareTimeout = 5 * time.Second - compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) + compareCtx, pauseBabeCancel := context.WithTimeout(ctx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, fmt.Sprint(numBlocks)) + _, err = compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(numBlocks)) + require.NoError(t, err) - cancel() + pauseBabeCancel() time.Sleep(time.Second * 3) } @@ -334,98 +330,135 @@ func TestSync_Restart(t *testing.T) { numNodes := 3 utils.Logger.Patch(log.SetLevel(log.Info)) + mainCtx, mainCancel := context.WithCancel(context.Background()) + + nodeCtxs := make([]context.Context, numNodes) + nodeCancels := make([]context.CancelFunc, numNodes) + nodeWaitErrs := make([]<-chan error, numNodes) + for i := 0; i < numNodes; i++ { + nodeCtxs[i], nodeCancels[i] = context.WithCancel(mainCtx) + } + + // Note we assume no runtime error in this test otherwise + // it gets rather complex to handle runtime errors and stop + // the test. + // start block producing node first - node, err := utils.RunGossamer(t, numNodes-1, - utils.TestDir(t, utils.KeyList[numNodes-1]), - utils.GenesisDefault, utils.ConfigDefault, - false, true) + genesisPath := libutils.GetGssmrGenesisRawPathTest(t) + blockProducingConfig := config.Default() + blockProducingConfig.Init.Genesis = genesisPath + blockProducingConfig.Core.BABELead = true + producingNode := node.New(t, blockProducingConfig, node.SetIndex(numNodes-1)) + + err := producingNode.Init(mainCtx) + require.NoError(t, err) + + nodeWaitErrs[0], err = producingNode.StartAndWait(nodeCtxs[0]) + t.Cleanup(func() { + // note we need to use indexes since these + // slice elements might change. + nodeCancels[0]() + <-nodeWaitErrs[0] + }) require.NoError(t, err) // wait and start rest of nodes time.Sleep(time.Second * 5) - nodes, err := utils.InitializeAndStartNodes(t, numNodes-1, utils.GenesisDefault, utils.ConfigNoBABE) - require.NoError(t, err) - nodes = append(nodes, node) + noBabeConfig := config.NoBabe() + noBabeConfig.Init.Genesis = genesisPath + nodes := node.MakeNodes(t, numNodes-1, noBabeConfig) + for i, node := range nodes { + err := node.Init(mainCtx) + require.NoError(t, err) - defer func() { - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + nodeWaitErrs[i+1], err = node.StartAndWait(nodeCtxs[i+1]) + t.Cleanup(func() { + // note we need to use indexes since these + // slice elements might change. + nodeCancels[i+1]() + <-nodeWaitErrs[i+1] + }) + require.NoError(t, err) + } - done := make(chan struct{}) + nodes = append(nodes, producingNode) // randomly turn off and on nodes + onOffRoutineDone := make(chan struct{}) go func() { + defer close(onOffRoutineDone) for { select { case <-time.After(time.Second * 10): idx := rand.Intn(numNodes) - errList := utils.StopNodes(t, nodes[idx:idx+1]) - require.Len(t, errList, 0) - - time.Sleep(time.Second) - - err = utils.StartNodes(t, nodes[idx:idx+1]) - require.NoError(t, err) - case <-done: + // Stop node + nodeCancels[idx]() + <-nodeWaitErrs[idx] + + // Start node + nodeCtxs[idx], nodeCancels[idx] = context.WithCancel(mainCtx) + nodeWaitErrs[idx], err = nodes[idx].Start(nodeCtxs[idx]) + if err != nil { + assert.NoError(t, err) // cannot use require.NoError from a goroutine + mainCancel() // stop all operations + return + } + case <-mainCtx.Done(): return } } }() - ctx := context.Background() - numCmps := 12 for i := 0; i < numCmps; i++ { t.Log("comparing...", i) const compareTimeout = 5 * time.Second - compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) + compareCtx, cancel := context.WithTimeout(mainCtx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, strconv.Itoa(i)) + _, err := compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(i)) + require.NoError(t, err) cancel() time.Sleep(time.Second * 5) } - close(done) + + mainCancel() + <-onOffRoutineDone } func TestSync_SubmitExtrinsic(t *testing.T) { t.Skip() - t.Log("starting gossamer...") + + ctx, cancel := context.WithCancel(context.Background()) // index of node to submit tx to idx := 0 // TODO: randomise this // start block producing node first - node, err := utils.RunGossamer(t, 0, - utils.TestDir(t, utils.KeyList[0]), utils.GenesisDev, - utils.ConfigNoGrandpa, false, true) - require.NoError(t, err) - nodes := []utils.Node{node} + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + configNoGrandpa := config.NoGrandpa() + configNoGrandpa.Init.Genesis = genesisPath + configNoGrandpa.Core.BABELead = true + producingNode := node.New(t, configNoGrandpa, node.SetIndex(0)) + producingNode.InitAndStartTest(ctx, t, cancel) + + nodes := node.Nodes{producingNode} + + configNoAuthority := config.NotAuthority() // Start rest of nodes - node, err = utils.RunGossamer(t, 1, - utils.TestDir(t, utils.KeyList[1]), utils.GenesisDev, - utils.ConfigNotAuthority, false, false) - require.NoError(t, err) - nodes = append(nodes, node) - node, err = utils.RunGossamer(t, 2, - utils.TestDir(t, utils.KeyList[2]), utils.GenesisDev, - utils.ConfigNotAuthority, false, false) - require.NoError(t, err) - nodes = append(nodes, node) + configNoAuthority.Init.Genesis = genesisPath + n := node.New(t, configNoAuthority, node.SetIndex(1)) + nodes = append(nodes, n) - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + n = node.New(t, configNoAuthority, node.SetIndex(2)) + nodes = append(nodes, n) // send tx to non-authority node - api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("http://localhost:%s", nodes[idx].RPCPort)) + api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("http://localhost:%s", nodes[idx].RPCPort())) require.NoError(t, err) meta, err := api.RPC.State.GetMetadataLatest() @@ -468,12 +501,11 @@ func TestSync_SubmitExtrinsic(t *testing.T) { extEnc, err := types.EncodeToHexString(ext) require.NoError(t, err) - ctx := context.Background() - // get starting header so that we can lookup blocks by number later getChainHeadCtx, getChainHeadCancel := context.WithTimeout(ctx, time.Second) - prevHeader := utils.GetChainHead(getChainHeadCtx, t, nodes[idx].RPCPort) + prevHeader, err := rpc.GetChainHead(getChainHeadCtx, nodes[idx].RPCPort()) getChainHeadCancel() + require.NoError(t, err) // Send the extrinsic hash, err := api.RPC.Author.SubmitExtrinsic(ext) @@ -483,21 +515,33 @@ func TestSync_SubmitExtrinsic(t *testing.T) { time.Sleep(time.Second * 20) // wait until there's no more pending extrinsics - for i := 0; i < maxRetries; i++ { - getPendingExtsCtx, getPendingExtsCancel := context.WithTimeout(ctx, time.Second) + const waitNoExtTimeout = 30 * time.Second + waitNoExtCtx, waitNoExtCancel := context.WithTimeout(ctx, waitNoExtTimeout) + for { + getPendingExtsCtx, getPendingExtsCancel := context.WithTimeout(waitNoExtCtx, time.Second) exts := getPendingExtrinsics(getPendingExtsCtx, t, nodes[idx]) getPendingExtsCancel() if len(exts) == 0 { + waitNoExtCancel() break } - time.Sleep(time.Second) + timer := time.NewTimer(time.Second) + select { + case <-timer.C: + case <-waitNoExtCtx.Done(): + if !timer.Stop() { + <-timer.C + } + require.NoError(t, waitNoExtCtx.Err()) + } } - getChainHeadCtx, cancel := context.WithTimeout(ctx, time.Second) - header := utils.GetChainHead(getChainHeadCtx, t, nodes[idx].RPCPort) - cancel() + getChainHeadCtx, getChainHeadCancel = context.WithTimeout(ctx, time.Second) + header, err := rpc.GetChainHead(getChainHeadCtx, nodes[idx].RPCPort()) + getChainHeadCancel() + require.NoError(t, err) // search from child -> parent blocks for extrinsic var ( @@ -505,18 +549,22 @@ func TestSync_SubmitExtrinsic(t *testing.T) { extInBlock uint ) - for i := 0; i < maxRetries; i++ { - getBlockCtx, getBlockCancel := context.WithTimeout(ctx, time.Second) - block := utils.GetBlock(getBlockCtx, t, nodes[idx].RPCPort, header.ParentHash) + const extrinsicSearchTimeout = 10 * time.Second + extrinsicSearchCtx, extrinsicSearchCancel := context.WithTimeout(ctx, extrinsicSearchTimeout) + for { + getBlockCtx, getBlockCancel := context.WithTimeout(extrinsicSearchCtx, time.Second) + block, err := rpc.GetBlock(getBlockCtx, nodes[idx].RPCPort(), header.ParentHash) getBlockCancel() + require.NoError(t, err) + if block == nil { // couldn't get block, increment retry counter continue } header = &block.Header - logger.Debugf("got block with header %s and body %v from node with key %s", header, block.Body, nodes[idx].Key) + logger.Debugf("got block with header %s and body %v from node with key %s", header, block.Body, nodes[idx].Key()) if block.Body != nil { resExts = block.Body @@ -524,6 +572,7 @@ func TestSync_SubmitExtrinsic(t *testing.T) { logger.Debugf("extrinsics: %v", resExts) if len(resExts) >= 2 { extInBlock = block.Header.Number + extrinsicSearchCancel() break } } @@ -546,32 +595,25 @@ func TestSync_SubmitExtrinsic(t *testing.T) { const compareTimeout = 5 * time.Second compareCtx, cancel := context.WithTimeout(ctx, compareTimeout) - _ = compareBlocksByNumber(compareCtx, t, nodes, fmt.Sprint(extInBlock)) + _, err = compareBlocksByNumber(compareCtx, nodes, fmt.Sprint(extInBlock)) + require.NoError(t, err) cancel() } func Test_SubmitAndWatchExtrinsic(t *testing.T) { - t.Log("starting gossamer...") - - // index of node to submit tx to - idx := 0 // TODO: randomise this - // start block producing node first - node, err := utils.RunGossamer(t, 0, - utils.TestDir(t, utils.KeyList[0]), - utils.GenesisDev, utils.ConfigNoGrandpa, true, true) - require.NoError(t, err) - nodes := []utils.Node{node} - - defer func() { - t.Log("going to tear down gossamer...") - errList := utils.StopNodes(t, nodes) - require.Len(t, errList, 0) - }() + genesisPath := libutils.GetDevGenesisSpecPathTest(t) + tomlConfig := config.NoGrandpa() + tomlConfig.Init.Genesis = genesisPath + tomlConfig.RPC.WS = true + tomlConfig.Core.BABELead = true + producingNode := node.New(t, tomlConfig, node.SetIndex(0)) + ctx, cancel := context.WithCancel(context.Background()) + producingNode.InitAndStartTest(ctx, t, cancel) // send tx to non-authority node - api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("ws://localhost:%s", nodes[idx].WSPort)) + api, err := gsrpc.NewSubstrateAPI(fmt.Sprintf("ws://localhost:%s", producingNode.WSPort())) require.NoError(t, err) meta, err := api.RPC.State.GetMetadataLatest() @@ -716,6 +758,13 @@ func TestSync_SubmitExtrinsicLoad(t *testing.T) { } func TestStress_SecondarySlotProduction(t *testing.T) { + rootPath, err := libutils.GetProjectRootPath() + require.NoError(t, err) + + // genesis_two_auths_secondaryvrf_0_9_10.json has 2 authorities and block production by + // secondary VRF slots enabled + genesisTwoAuthsSecondaryVRF0_9_10 := filepath.Join(rootPath, "tests/utils/genesis_two_auths_secondaryvrf_0_9_10.json") + testcases := []struct { description string genesis string @@ -723,35 +772,38 @@ func TestStress_SecondarySlotProduction(t *testing.T) { }{ { description: "with secondary vrf slots enabled", - genesis: utils.GenesisTwoAuthsSecondaryVRF0_9_10, + genesis: genesisTwoAuthsSecondaryVRF0_9_10, allowedSlots: gosstypes.PrimaryAndSecondaryVRFSlots, }, } const numNodes = 2 for _, c := range testcases { t.Run(c.description, func(t *testing.T) { - nodes, err := utils.InitializeAndStartNodes(t, numNodes, c.genesis, utils.ConfigDefault) - require.NoError(t, err) - defer utils.StopNodes(t, nodes) + tomlConfig := config.Default() + tomlConfig.Init.Genesis = c.genesis + + nodes := node.MakeNodes(t, numNodes, tomlConfig) + + ctx, cancel := context.WithCancel(context.Background()) + nodes.InitAndStartTest(ctx, t, cancel) primaryCount := 0 secondaryPlainCount := 0 secondaryVRFCount := 0 - ctx := context.Background() - for i := 1; i < 10; i++ { fmt.Printf("%d iteration\n", i) getBlockHashCtx, cancel := context.WithTimeout(ctx, time.Second) - hash, err := utils.GetBlockHash(getBlockHashCtx, t, nodes[0].RPCPort, fmt.Sprintf("%d", i)) + hash, err := rpc.GetBlockHash(getBlockHashCtx, nodes[0].RPCPort(), fmt.Sprint(i)) cancel() require.NoError(t, err) getBlockCtx, cancel := context.WithTimeout(ctx, time.Second) - block := utils.GetBlock(getBlockCtx, t, nodes[0].RPCPort, hash) + block, err := rpc.GetBlock(getBlockCtx, nodes[0].RPCPort(), hash) cancel() + require.NoError(t, err) header := block.Header diff --git a/tests/sync/sync_test.go b/tests/sync/sync_test.go index ff9512b8f2..69880ef743 100644 --- a/tests/sync/sync_test.go +++ b/tests/sync/sync_test.go @@ -5,18 +5,14 @@ package sync import ( "context" - "fmt" - "log" - "os" "testing" "time" "github.com/ChainSafe/gossamer/tests/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/stretchr/testify/require" ) -var framework utils.Framework - type testRPCCall struct { nodeIdx int method string @@ -46,27 +42,33 @@ var checks = []checkDBCall{ {call1idx: 3, call2idx: 5, field: "parentHash"}, } -func TestMain(m *testing.M) { +// this starts nodes and runs RPC calls (which loads db) +func TestCalls(t *testing.T) { if utils.MODE != "sync" { - fmt.Println("Going to skip stress test") - return - } - fw, err := utils.InitFramework(3) - if err != nil { - log.Fatal(fmt.Errorf("error initialising test framework")) + t.Skip("MODE != 'sync', skipping stress test") } - framework = *fw - // Start all tests - code := m.Run() - os.Exit(code) -} -// this starts nodes and runs RPC calls (which loads db) -func TestCalls(t *testing.T) { ctx := context.Background() - err := framework.StartNodes(t) - require.Len(t, err, 0) + const qtyNodes = 3 + tomlConfig := config.Default() + framework, err := utils.InitFramework(ctx, t, qtyNodes, tomlConfig) + + require.NoError(t, err) + + nodesCtx, nodesCancel := context.WithCancel(ctx) + + runtimeErrors, startErr := framework.StartNodes(nodesCtx, t) + + t.Cleanup(func() { + nodesCancel() + for _, runtimeError := range runtimeErrors { + <-runtimeError + } + }) + + require.NoError(t, startErr) + for _, call := range tests { time.Sleep(call.delay) @@ -87,7 +89,4 @@ func TestCalls(t *testing.T) { res := framework.CheckEqual(check.call1idx, check.call2idx, check.field) require.True(t, res) } - - err = framework.KillNodes(t) - require.Len(t, err, 0) } diff --git a/tests/utils/chain.go b/tests/utils/chain.go deleted file mode 100644 index 055d888316..0000000000 --- a/tests/utils/chain.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/rpc/modules" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/require" -) - -// GetChainHead calls the endpoint chain_getHeader to get the latest chain head -func GetChainHead(ctx context.Context, t *testing.T, rpcPort string) *types.Header { - endpoint := NewEndpoint(rpcPort) - const params = "[]" - respBody, err := PostRPC(ctx, endpoint, ChainGetHeader, params) - require.NoError(t, err) - - header := new(modules.ChainBlockHeaderResponse) - err = DecodeRPC(t, respBody, header) - require.NoError(t, err) - - return headerResponseToHeader(t, header) -} - -// GetChainHeadWithError calls the endpoint chain_getHeader to get the latest chain head -func GetChainHeadWithError(ctx context.Context, t *testing.T, rpcPort string) (*types.Header, error) { - endpoint := NewEndpoint(rpcPort) - const params = "[]" - respBody, err := PostRPC(ctx, endpoint, ChainGetHeader, params) - require.NoError(t, err) - - header := new(modules.ChainBlockHeaderResponse) - err = DecodeRPC(t, respBody, header) - if err != nil { - return nil, err - } - - return headerResponseToHeader(t, header), nil -} - -// GetBlockHash calls the endpoint chain_getBlockHash to get the latest chain head. -// It will block until a response is received or the context gets canceled. -func GetBlockHash(ctx context.Context, t *testing.T, rpcPort, num string) (common.Hash, error) { - endpoint := NewEndpoint(rpcPort) - params := "[" + num + "]" - const requestWait = time.Second - respBody, err := PostRPCWithRetry(ctx, endpoint, ChainGetBlockHash, params, requestWait) - require.NoError(t, err) - - var hash string - err = DecodeRPC(t, respBody, &hash) - if err != nil { - return common.Hash{}, err - } - return common.MustHexToHash(hash), nil -} - -// GetFinalizedHead calls the endpoint chain_getFinalizedHead to get the latest finalised head -func GetFinalizedHead(ctx context.Context, t *testing.T, rpcPort string) common.Hash { - endpoint := NewEndpoint(rpcPort) - method := ChainGetFinalizedHead - const params = "[]" - respBody, err := PostRPC(ctx, endpoint, method, params) - require.NoError(t, err) - - var hash string - err = DecodeRPC(t, respBody, &hash) - require.NoError(t, err) - return common.MustHexToHash(hash) -} - -// GetFinalizedHeadByRound calls the endpoint chain_getFinalizedHeadByRound to get the finalised head at a given round -// TODO: add setID, hard-coded at 1 for now -func GetFinalizedHeadByRound(ctx context.Context, t *testing.T, rpcPort string, round uint64) (common.Hash, error) { - p := strconv.Itoa(int(round)) - endpoint := NewEndpoint(rpcPort) - method := ChainGetFinalizedHeadByRound - params := "[" + p + ",1]" - respBody, err := PostRPC(ctx, endpoint, method, params) - require.NoError(t, err) - - var hash string - err = DecodeRPC(t, respBody, &hash) - if err != nil { - return common.Hash{}, err - } - - return common.MustHexToHash(hash), nil -} - -// GetBlock calls the endpoint chain_getBlock -func GetBlock(ctx context.Context, t *testing.T, rpcPort string, hash common.Hash) *types.Block { - endpoint := NewEndpoint(rpcPort) - method := ChainGetBlock - params := fmt.Sprintf(`["%s"]`, hash) - respBody, err := PostRPC(ctx, endpoint, method, params) - require.NoError(t, err) - - block := new(modules.ChainBlockResponse) - err = DecodeRPC(t, respBody, block) - if err != nil { - return nil - } - - header := block.Block.Header - - parentHash, err := common.HexToHash(header.ParentHash) - require.NoError(t, err) - - nb, err := common.HexToBytes(header.Number) - require.NoError(t, err) - number := common.BytesToUint(nb) - - stateRoot, err := common.HexToHash(header.StateRoot) - require.NoError(t, err) - - extrinsicsRoot, err := common.HexToHash(header.ExtrinsicsRoot) - require.NoError(t, err) - - h, err := types.NewHeader(parentHash, stateRoot, extrinsicsRoot, number, rpcLogsToDigest(t, header.Digest.Logs)) - require.NoError(t, err) - - b, err := types.NewBodyFromExtrinsicStrings(block.Block.Body) - require.NoError(t, err, fmt.Sprintf("%v", block.Block.Body)) - - return &types.Block{ - Header: *h, - Body: *b, - } -} diff --git a/tests/utils/common.go b/tests/utils/common.go index 7948583c4b..1a4b4805b7 100644 --- a/tests/utils/common.go +++ b/tests/utils/common.go @@ -4,7 +4,6 @@ package utils import ( - "encoding/json" "os" ) @@ -12,59 +11,9 @@ var ( // MODE is the value for the environnent variable MODE. MODE = os.Getenv("MODE") - // HOSTNAME is the value for the environnent variable HOSTNAME. - HOSTNAME = os.Getenv("HOSTNAME") // PORT is the value for the environnent variable PORT. PORT = os.Getenv("PORT") // LOGLEVEL is the value for the environnent variable LOGLEVEL. LOGLEVEL = os.Getenv("LOG") - - // NETWORK_SIZE is the value for the environnent variable NETWORK_SIZE. - NETWORK_SIZE = os.Getenv("NETWORK_SIZE") //nolint:revive ) - -// ServerResponse wraps the RPC response -type ServerResponse struct { - // JSON-RPC Version - Version string `json:"jsonrpc"` - // Resulting values - Result json.RawMessage `json:"result"` - // Any generated errors - Error *Error `json:"error"` - // Request id - ID *json.RawMessage `json:"id"` -} - -// WebsocketResponse wraps the Websocket response -type WebsocketResponse struct { - // JSON-RPC Version - Version string `json:"jsonrpc"` - // Method name called - Method string `json:"method"` - // Resulting values - Result json.RawMessage `json:"result"` - // Params values including results - Params json.RawMessage `json:"params"` - // Any generated errors - Error *Error `json:"error"` - // Request id - Subscription *json.RawMessage `json:"subscription"` - // Request id - ID *json.RawMessage `json:"id"` -} - -// ErrCode is a int type used for the rpc error codes -type ErrCode int - -// Error is a struct that holds the error message and the error code for a error -type Error struct { - Message string `json:"message"` - ErrorCode ErrCode `json:"code"` - Data map[string]interface{} `json:"data"` -} - -// Error returns the error Message string -func (e *Error) Error() string { - return e.Message -} diff --git a/tests/utils/config/config.go b/tests/utils/config/config.go new file mode 100644 index 0000000000..81499e8ac6 --- /dev/null +++ b/tests/utils/config/config.go @@ -0,0 +1,51 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package config + +import ( + "github.com/ChainSafe/gossamer/dot/config/toml" +) + +// LogGrandpa generates a grandpa config. +func LogGrandpa() (cfg toml.Config) { + cfg = Default() + cfg.Log = toml.LogConfig{ + CoreLvl: "crit", + NetworkLvl: "debug", + RuntimeLvl: "crit", + BlockProducerLvl: "info", + FinalityGadgetLvl: "debug", + } + return cfg +} + +// NoBabe generates a no-babe config. +func NoBabe() (cfg toml.Config) { + cfg = Default() + cfg.Global.LogLvl = "info" + cfg.Log = toml.LogConfig{ + SyncLvl: "debug", + NetworkLvl: "debug", + } + cfg.Core.BabeAuthority = false + return cfg +} + +// NoGrandpa generates an no-grandpa config. +func NoGrandpa() (cfg toml.Config) { + cfg = Default() + cfg.Core.GrandpaAuthority = false + cfg.Core.BABELead = true + cfg.Core.GrandpaInterval = 1 + return cfg +} + +// NotAuthority generates an non-authority config. +func NotAuthority() (cfg toml.Config) { + cfg = Default() + cfg.Core.Roles = 1 + cfg.Core.BabeAuthority = false + cfg.Core.GrandpaAuthority = false + return cfg +} diff --git a/tests/utils/config/default.go b/tests/utils/config/default.go new file mode 100644 index 0000000000..ba18ce7757 --- /dev/null +++ b/tests/utils/config/default.go @@ -0,0 +1,52 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package config + +import ( + "github.com/ChainSafe/gossamer/dot/config/toml" +) + +// Default returns a default TOML configuration for Gossamer. +func Default() toml.Config { + return toml.Config{ + Global: toml.GlobalConfig{ + Name: "Gossamer", + ID: "gssmr", + LogLvl: "info", + MetricsAddress: "localhost:9876", + RetainBlocks: 256, + Pruning: "archive", + }, + Log: toml.LogConfig{ + CoreLvl: "info", + SyncLvl: "info", + }, + Account: toml.AccountConfig{ + Key: "", + Unlock: "", + }, + Core: toml.CoreConfig{ + Roles: 4, + BabeAuthority: true, + GrandpaAuthority: true, + GrandpaInterval: 1, + }, + Network: toml.NetworkConfig{ + Bootnodes: nil, + ProtocolID: "/gossamer/gssmr/0", + NoBootstrap: false, + NoMDNS: false, + MinPeers: 1, + MaxPeers: 3, + }, + RPC: toml.RPCConfig{ + Enabled: true, + Unsafe: true, + WSUnsafe: true, + Host: "localhost", + Modules: []string{"system", "author", "chain", "state", "dev", "rpc"}, + WS: false, + }, + } +} diff --git a/tests/utils/config/write.go b/tests/utils/config/write.go new file mode 100644 index 0000000000..e4f1dc4c75 --- /dev/null +++ b/tests/utils/config/write.go @@ -0,0 +1,27 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package config + +import ( + "os" + "path/filepath" + "testing" + + ctoml "github.com/ChainSafe/gossamer/dot/config/toml" + "github.com/naoina/toml" + "github.com/stretchr/testify/require" +) + +// Write writes the toml configuration to a file +// in a temporary test directory which gets removed at +// the end of the test. +func Write(t *testing.T, cfg ctoml.Config) (configPath string) { + t.Helper() + configPath = filepath.Join(t.TempDir(), "config.toml") + raw, err := toml.Marshal(cfg) + require.NoError(t, err) + err = os.WriteFile(configPath, raw, os.ModePerm) + require.NoError(t, err) + return configPath +} diff --git a/tests/utils/dev.go b/tests/utils/dev.go deleted file mode 100644 index 43680e10dd..0000000000 --- a/tests/utils/dev.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "context" - "encoding/binary" - "strconv" - "testing" - "time" - - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/require" -) - -// PauseBABE calls the endpoint dev_control with the params ["babe", "stop"] -func PauseBABE(ctx context.Context, rpcPort string) error { - endpoint := NewEndpoint(rpcPort) - const params = `["babe", "stop"]` - _, err := PostRPC(ctx, endpoint, DevControl, params) - return err -} - -// SlotDuration Calls dev endpoint for slot duration -func SlotDuration(ctx context.Context, t *testing.T, rpcPort string) time.Duration { - endpoint := NewEndpoint(rpcPort) - const method = "dev_slotDuration" - const params = "[]" - slotDuration, err := PostRPC(ctx, endpoint, method, params) - - if err != nil { - require.NoError(t, err) - } - - slotDurationDecoded := new(string) - err = DecodeRPC(t, slotDuration, slotDurationDecoded) - require.NoError(t, err) - - slotDurationParsed := binary.LittleEndian.Uint64(common.MustHexToBytes(*slotDurationDecoded)) - duration, err := time.ParseDuration(strconv.Itoa(int(slotDurationParsed)) + "ms") - require.NoError(t, err) - return duration -} - -// EpochLength Calls dev endpoint for epoch length -func EpochLength(ctx context.Context, t *testing.T, rpcPort string) uint64 { - endpoint := NewEndpoint(rpcPort) - const method = "dev_epochLength" - const params = "[]" - epochLength, err := PostRPC(ctx, endpoint, method, params) - if err != nil { - require.NoError(t, err) - } - - epochLengthDecoded := new(string) - err = DecodeRPC(t, epochLength, epochLengthDecoded) - require.NoError(t, err) - - epochLengthParsed := binary.LittleEndian.Uint64(common.MustHexToBytes(*epochLengthDecoded)) - return epochLengthParsed -} diff --git a/tests/utils/framework.go b/tests/utils/framework.go index 135aae3698..febb137a88 100644 --- a/tests/utils/framework.go +++ b/tests/utils/framework.go @@ -6,34 +6,40 @@ package utils import ( "context" "fmt" - "os" "strconv" "testing" + "github.com/ChainSafe/gossamer/dot/config/toml" + "github.com/ChainSafe/gossamer/tests/utils/node" + "github.com/ChainSafe/gossamer/tests/utils/rpc" scribble "github.com/nanobox-io/golang-scribble" ) // Framework struct to hold references to framework data type Framework struct { - nodes []Node + nodes node.Nodes db *scribble.Driver callQty int } -// InitFramework creates given quanity of nodes -func InitFramework(qtyNodes int) (*Framework, error) { +// NewFramework creates a new framework. +func NewFramework() (framework *Framework) { + return &Framework{} +} + +// InitFramework creates given quantity of nodes +func InitFramework(ctx context.Context, t *testing.T, qtyNodes int, + tomlConfig toml.Config) (*Framework, error) { f := &Framework{} - nodes, err := InitNodes(qtyNodes, ConfigDefault) - if err != nil { - return nil, err - } - f.nodes = nodes - tempDir, err := os.MkdirTemp("", "gossamer-stress-db") + f.nodes = node.MakeNodes(t, qtyNodes, tomlConfig) + + err := f.nodes.Init(ctx) if err != nil { - return nil, err + return nil, fmt.Errorf("cannot init nodes: %w", err) } - db, err := scribble.New(tempDir, nil) + + db, err := scribble.New(t.TempDir(), nil) if err != nil { return nil, err } @@ -43,20 +49,9 @@ func InitFramework(qtyNodes int) (*Framework, error) { } // StartNodes calls RestartGossamor for all nodes -func (fw *Framework) StartNodes(t *testing.T) (errorList []error) { - for i, node := range fw.nodes { - var err error - fw.nodes[i], err = startGossamer(t, node, false) - if err != nil { - errorList = append(errorList, err) - } - } - return errorList -} - -// KillNodes stops all running nodes -func (fw *Framework) KillNodes(t *testing.T) []error { - return TearDown(t, fw.nodes) +func (fw *Framework) StartNodes(ctx context.Context, t *testing.T) ( + runtimeErrors []<-chan error, startErr error) { + return fw.nodes.Start(ctx) } // CallRPC call RPC method with given params for node at idx @@ -66,12 +61,12 @@ func (fw *Framework) CallRPC(ctx context.Context, idx int, method, params string return nil, fmt.Errorf("node index greater than quantity of nodes") } node := fw.nodes[idx] - respBody, err := PostRPC(ctx, NewEndpoint(node.RPCPort), method, params) + respBody, err := rpc.Post(ctx, rpc.NewEndpoint(node.RPCPort()), method, params) if err != nil { return nil, err } - err = DecodeRPC_NT(respBody, &respJSON) + err = rpc.Decode(respBody, &respJSON) if err != nil { return nil, fmt.Errorf("error making RPC call %v", err) } diff --git a/tests/utils/gossamer_utils.go b/tests/utils/gossamer_utils.go index b229920bb1..d513b05010 100644 --- a/tests/utils/gossamer_utils.go +++ b/tests/utils/gossamer_utils.go @@ -4,572 +4,34 @@ package utils import ( - "bufio" - "context" - "fmt" - "io" "os" - "os/exec" "path/filepath" - "strconv" - "sync" "testing" - "time" "github.com/ChainSafe/gossamer/dot" - ctoml "github.com/ChainSafe/gossamer/dot/config/toml" - "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/utils" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Logger is the utils package local logger. var Logger = log.NewFromGlobal(log.AddContext("pkg", "test/utils")) -var maxRetries = 24 -var ( - // KeyList is the list of built-in keys - KeyList = []string{"alice", "bob", "charlie", "dave", "eve", "ferdie", "george", "heather", "ian"} - basePort = 7000 +// GenerateGenesisAuths generates a genesis file with numAuths authorities +// and returns the file path to the genesis file. The genesis file is +// automatically removed when the test ends. +func GenerateGenesisAuths(t *testing.T, numAuths int) (genesisPath string) { + gssmrGenesisPath := utils.GetGssmrGenesisPathTest(t) - // BaseRPCPort is the starting RPC port for test nodes - BaseRPCPort = 8540 + buildSpec, err := dot.BuildFromGenesis(gssmrGenesisPath, numAuths) + require.NoError(t, err) - // BaseWSPort is the starting Websocket port for test nodes - BaseWSPort = 8546 + buildSpecJSON, err := buildSpec.ToJSONRaw() + require.NoError(t, err) - currentDir, _ = os.Getwd() - gossamerCMD = filepath.Join(currentDir, "../..", "bin/gossamer") + genesisPath = filepath.Join(t.TempDir(), "genesis.json") + err = os.WriteFile(genesisPath, buildSpecJSON, os.ModePerm) + require.NoError(t, err) - // GenesisOneAuth is the genesis file that has 1 authority - GenesisOneAuth = filepath.Join(currentDir, "../utils/genesis_oneauth.json") - // GenesisThreeAuths is the genesis file that has 3 authorities - GenesisThreeAuths = filepath.Join(currentDir, "../utils/genesis_threeauths.json") - // GenesisTwoAuthsSecondaryVRF0_9_10 is the genesis file that has 2 authorities and block production by - // secondary VRF slots enabled - GenesisTwoAuthsSecondaryVRF0_9_10 = filepath.Join(currentDir, "../utils/genesis_two_auths_secondaryvrf_0_9_10.json") - - // GenesisSixAuths is the genesis file that has 6 authorities - GenesisSixAuths = filepath.Join(currentDir, "../utils/genesis_sixauths.json") - // GenesisDefault is the default gssmr genesis file - GenesisDefault = filepath.Join(currentDir, "../..", "chain/gssmr/genesis.json") - // GenesisDev is the default dev genesis file - GenesisDev = filepath.Join(currentDir, "../..", "chain/dev/genesis-spec.json") - - // ConfigDefault is the default config file - ConfigDefault = filepath.Join(currentDir, "../utils/config_default.toml") - // ConfigLogGrandpa is a config file where log levels are set to CRIT except for GRANDPA - ConfigLogGrandpa = filepath.Join(currentDir, "../utils/config_log_grandpa.toml") - // ConfigNoBABE is a config file with BABE disabled - ConfigNoBABE = filepath.Join(currentDir, "../utils/config_nobabe.toml") - // ConfigNoGrandpa is a config file with grandpa disabled - ConfigNoGrandpa = filepath.Join(currentDir, "../utils/config_nograndpa.toml") - // ConfigNotAuthority is a config file with no authority functionality - ConfigNotAuthority = filepath.Join(currentDir, "../utils/config_notauthority.toml") -) - -// Node represents a gossamer process -type Node struct { - Process *exec.Cmd - Key string - RPCPort string - Idx int - basePath string - config string - WSPort string - BABELead bool -} - -// InitGossamer initialises given node number and returns node reference -func InitGossamer(idx int, basePath, genesis, config string) ( - node Node, err error) { - cmdInit := exec.Command(gossamerCMD, "init", - "--config", config, - "--basepath", basePath, - "--genesis", genesis, - "--force", - ) - - Logger.Info("initialising gossamer using " + cmdInit.String() + "...") - stdOutInit, err := cmdInit.CombinedOutput() - if err != nil { - fmt.Printf("%s", stdOutInit) - return node, err - } - - Logger.Infof("initialised gossamer node %d!", idx) - return Node{ - Idx: idx, - RPCPort: strconv.Itoa(BaseRPCPort + idx), - WSPort: strconv.Itoa(BaseWSPort + idx), - basePath: basePath, - config: config, - }, nil -} - -// startGossamer starts given node -func startGossamer(t *testing.T, node Node, websocket bool) ( - updatedNode Node, err error) { - var key string - var params = []string{"--port", strconv.Itoa(basePort + node.Idx), - "--config", node.config, - "--basepath", node.basePath, - "--rpchost", HOSTNAME, - "--rpcport", node.RPCPort, - "--rpcmods", "system,author,chain,state,dev,rpc", - "--rpc", - "--no-telemetry", - "--log", "info"} - - if node.BABELead { - params = append(params, "--babe-lead") - } - - if node.Idx >= len(KeyList) { - params = append(params, "--roles", "1") - } else { - key = KeyList[node.Idx] - params = append(params, "--roles", "4", - "--key", key) - } - - if websocket { - params = append(params, "--ws", - "--wsport", node.WSPort) - } - node.Process = exec.Command(gossamerCMD, params...) - - node.Key = key - - Logger.Infof("node basepath: %s", node.basePath) - // create log file - outfile, err := os.Create(filepath.Join(node.basePath, "log.out")) - if err != nil { - Logger.Errorf("Error when trying to set a log file for gossamer output: %s", err) - return node, err - } - - // create error log file - errfile, err := os.Create(filepath.Join(node.basePath, "error.out")) - if err != nil { - Logger.Errorf("Error when trying to set a log file for gossamer output: %s", err) - return node, err - } - - t.Cleanup(func() { - time.Sleep(time.Second) // wait for goroutine to finish writing - err = outfile.Close() - assert.NoError(t, err) - err = errfile.Close() - assert.NoError(t, err) - }) - - stdoutPipe, err := node.Process.StdoutPipe() - if err != nil { - Logger.Errorf("failed to get stdoutPipe from node %d: %s", node.Idx, err) - return node, err - } - - stderrPipe, err := node.Process.StderrPipe() - if err != nil { - Logger.Errorf("failed to get stderrPipe from node %d: %s", node.Idx, err) - return node, err - } - - Logger.Infof("starting gossamer at %s...", node.Process) - err = node.Process.Start() - if err != nil { - Logger.Errorf("Could not execute gossamer cmd: %s", err) - return node, err - } - - writer := bufio.NewWriter(outfile) - go func() { - _, err := io.Copy(writer, stdoutPipe) - if err != nil { - Logger.Errorf("failed copying stdout to writer: %s", err) - } - }() - errWriter := bufio.NewWriter(errfile) - go func() { - _, err := io.Copy(errWriter, stderrPipe) - if err != nil { - Logger.Errorf("failed copying stderr to writer: %s", err) - } - }() - - ctx := context.Background() - - var started bool - for i := 0; i < maxRetries; i++ { - time.Sleep(time.Second * 5) - - const checkNodeStartedTimeout = time.Second - checkNodeCtx, cancel := context.WithTimeout(ctx, checkNodeStartedTimeout) - - addr := fmt.Sprintf("http://%s:%s", HOSTNAME, node.RPCPort) - err = checkNodeStarted(checkNodeCtx, t, addr) - - cancel() - - if err == nil { - started = true - break - } - } - - if started { - Logger.Infof("node started with key %s and cmd.Process.Pid %d", key, node.Process.Process.Pid) - } else { - Logger.Criticalf("node didn't start: %s", err) - errFileContents, _ := os.ReadFile(errfile.Name()) - t.Logf("%s\n", errFileContents) - return node, err - } - - return node, nil -} - -// RunGossamer will initialise and start a gossamer instance -func RunGossamer(t *testing.T, idx int, basepath, genesis, config string, websocket, babeLead bool) ( - node Node, err error) { - node, err = InitGossamer(idx, basepath, genesis, config) - if err != nil { - return node, fmt.Errorf("could not initialise gossamer: %w", err) - } - - if idx == 0 || babeLead { - node.BABELead = true - } - - node, err = startGossamer(t, node, websocket) - if err != nil { - return node, fmt.Errorf("could not start gossamer: %w", err) - } - - return node, nil -} - -// checkNodeStarted check if gossamer node is started -func checkNodeStarted(ctx context.Context, t *testing.T, gossamerHost string) error { - const method = "system_health" - const params = "{}" - respBody, err := PostRPC(ctx, gossamerHost, method, params) - if err != nil { - return err - } - - target := new(modules.SystemHealthResponse) - err = DecodeRPC(t, respBody, target) - if err != nil { - return err - } - - if !target.ShouldHavePeers { - return fmt.Errorf("no peers") - } - - return nil -} - -// killProcess kills a instance of gossamer -func killProcess(t *testing.T, cmd *exec.Cmd) error { - err := cmd.Process.Kill() - if err != nil { - t.Log("failed to kill process", "cmd", cmd) - } - return err -} - -// InitNodes initialises given number of nodes -func InitNodes(num int, config string) (nodes []Node, err error) { - tempDir, err := os.MkdirTemp("", "gossamer-stress-") - if err != nil { - return nil, err - } - - for i := 0; i < num; i++ { - node, err := InitGossamer(i, tempDir+strconv.Itoa(i), GenesisDefault, config) - if err != nil { - Logger.Errorf("failed to initialise Gossamer for node index %d", i) - return nil, err - } - - nodes = append(nodes, node) - } - return nodes, nil -} - -// StartNodes starts given array of nodes -func StartNodes(t *testing.T, nodes []Node) (err error) { - for i, n := range nodes { - nodes[i], err = startGossamer(t, n, false) - if err != nil { - return fmt.Errorf("node %d of %d: %w", - i+1, len(nodes), err) - } - } - return nil -} - -// InitializeAndStartNodes will spin up `num` gossamer nodes -func InitializeAndStartNodes(t *testing.T, num int, genesis, config string) ( - nodes []Node, err error) { - var wg sync.WaitGroup - var nodesMutex, errMutex sync.Mutex - wg.Add(num) - - for i := 0; i < num; i++ { - go func(i int) { - defer wg.Done() - name := strconv.Itoa(i) - if i < len(KeyList) { - name = KeyList[i] - } - node, runErr := RunGossamer(t, i, TestDir(t, name), genesis, config, false, false) - if runErr != nil { - errMutex.Lock() - if err == nil { - err = fmt.Errorf("failed to run Gossamer for node index %d: %w", i, runErr) - } - errMutex.Unlock() - return - } - - nodesMutex.Lock() - nodes = append(nodes, node) - nodesMutex.Unlock() - }(i) - } - - wg.Wait() - - if err != nil { - _ = StopNodes(t, nodes) - return nil, err - } - - return nodes, nil -} - -// InitializeAndStartNodesWebsocket will spin up `num` gossamer nodes running with Websocket rpc enabled -func InitializeAndStartNodesWebsocket(t *testing.T, num int, genesis, config string) ( - nodes []Node, err error) { - var nodesMutex, errMutex sync.Mutex - var wg sync.WaitGroup - - wg.Add(num) - - for i := 0; i < num; i++ { - go func(i int) { - defer wg.Done() - name := strconv.Itoa(i) - if i < len(KeyList) { - name = KeyList[i] - } - node, runErr := RunGossamer(t, i, TestDir(t, name), genesis, config, true, false) - if runErr != nil { - errMutex.Lock() - if err == nil { - err = fmt.Errorf("failed to run Gossamer for node index %d: %w", i, runErr) - } - errMutex.Unlock() - return - } - - nodesMutex.Lock() - nodes = append(nodes, node) - nodesMutex.Unlock() - }(i) - } - - wg.Wait() - - if err != nil { - _ = StopNodes(t, nodes) - return nil, err - } - - return nodes, nil -} - -// StopNodes stops the given nodes -func StopNodes(t *testing.T, nodes []Node) (errs []error) { - for i := range nodes { - cmd := nodes[i].Process - err := killProcess(t, cmd) - if err != nil { - Logger.Errorf("failed to kill Gossamer (cmd %s) for node index %d", cmd, i) - errs = append(errs, err) - } - } - - return errs -} - -// TearDown stops the given nodes and remove their datadir -func TearDown(t *testing.T, nodes []Node) (errorList []error) { - for i, node := range nodes { - cmd := nodes[i].Process - err := killProcess(t, cmd) - if err != nil { - Logger.Errorf("failed to kill Gossamer (cmd %s) for node index %d", cmd, i) - errorList = append(errorList, err) - } - - err = os.RemoveAll(node.basePath) - if err != nil { - Logger.Error("failed to remove base path directory " + node.basePath) - errorList = append(errorList, err) - } - } - - return errorList -} - -// TestDir returns the test directory path /test_data// -func TestDir(t *testing.T, name string) string { - return filepath.Join("/tmp/", t.Name(), name) -} - -// GenerateGenesisThreeAuth generates Genesis file with three authority. -func GenerateGenesisThreeAuth() { - genesisPath, err := utils.GetGssmrGenesisPath() - if err != nil { - panic(err) - } - - bs, err := dot.BuildFromGenesis(genesisPath, 3) - if err != nil { - Logger.Errorf("genesis file not found: %s", err) - os.Exit(1) - } - dot.CreateJSONRawFile(bs, GenesisThreeAuths) -} - -// GenerateGenesisSixAuth generates Genesis file with six authority. -func GenerateGenesisSixAuth(t *testing.T) { - bs, err := dot.BuildFromGenesis(utils.GetGssmrGenesisPathTest(t), 6) - if err != nil { - Logger.Errorf("genesis file not found: %s", err) - os.Exit(1) - } - dot.CreateJSONRawFile(bs, GenesisSixAuths) -} - -func generateDefaultConfig() *ctoml.Config { - return &ctoml.Config{ - Global: ctoml.GlobalConfig{ - Name: "Gossamer", - ID: "gssmr", - LogLvl: "crit", - MetricsAddress: "localhost:9876", - RetainBlocks: 256, - Pruning: "archive", - }, - Log: ctoml.LogConfig{ - CoreLvl: "info", - SyncLvl: "info", - }, - Init: ctoml.InitConfig{ - Genesis: "./chain/gssmr/genesis.json", - }, - Account: ctoml.AccountConfig{ - Key: "", - Unlock: "", - }, - Core: ctoml.CoreConfig{ - Roles: 4, - BabeAuthority: true, - GrandpaAuthority: true, - GrandpaInterval: 1, - }, - Network: ctoml.NetworkConfig{ - Bootnodes: nil, - ProtocolID: "/gossamer/gssmr/0", - NoBootstrap: false, - NoMDNS: false, - MinPeers: 1, - MaxPeers: 3, - }, - RPC: ctoml.RPCConfig{ - Enabled: false, - Unsafe: true, - WSUnsafe: true, - Host: "localhost", - Modules: []string{"system", "author", "chain", "state"}, - WS: false, - }, - } -} - -// CreateDefaultConfig generates and creates default config file. -func CreateDefaultConfig() { - cfg := generateDefaultConfig() - dot.ExportTomlConfig(cfg, ConfigDefault) -} - -func generateConfigLogGrandpa() *ctoml.Config { - cfg := generateDefaultConfig() - cfg.Log = ctoml.LogConfig{ - CoreLvl: "crit", - NetworkLvl: "debug", - RuntimeLvl: "crit", - BlockProducerLvl: "info", - FinalityGadgetLvl: "debug", - } - return cfg -} - -// CreateConfigLogGrandpa generates and creates grandpa config file. -func CreateConfigLogGrandpa() { - cfg := generateConfigLogGrandpa() - dot.ExportTomlConfig(cfg, ConfigLogGrandpa) -} - -func generateConfigNoBabe() *ctoml.Config { - cfg := generateDefaultConfig() - cfg.Global.LogLvl = "info" - cfg.Log = ctoml.LogConfig{ - SyncLvl: "debug", - NetworkLvl: "debug", - } - - cfg.Core.BabeAuthority = false - return cfg -} - -// CreateConfigNoBabe generates and creates no babe config file. -func CreateConfigNoBabe() { - cfg := generateConfigNoBabe() - dot.ExportTomlConfig(cfg, ConfigNoBABE) -} - -func generateConfigNoGrandpa() *ctoml.Config { - cfg := generateDefaultConfig() - cfg.Core.GrandpaAuthority = false - cfg.Core.BABELead = true - cfg.Core.GrandpaInterval = 1 - return cfg -} - -// CreateConfigNoGrandpa generates and creates no grandpa config file. -func CreateConfigNoGrandpa() { - cfg := generateConfigNoGrandpa() - dot.ExportTomlConfig(cfg, ConfigNoGrandpa) -} - -func generateConfigNotAuthority() *ctoml.Config { - cfg := generateDefaultConfig() - cfg.Core.Roles = 1 - cfg.Core.BabeAuthority = false - cfg.Core.GrandpaAuthority = false - return cfg -} - -// CreateConfigNotAuthority generates and creates non-authority config file. -func CreateConfigNotAuthority() { - cfg := generateConfigNotAuthority() - dot.ExportTomlConfig(cfg, ConfigNotAuthority) + return genesisPath } diff --git a/tests/utils/header.go b/tests/utils/header.go deleted file mode 100644 index a3eccf0b77..0000000000 --- a/tests/utils/header.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/rpc/modules" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/require" -) - -// headerResponseToHeader converts a *ChainBlockHeaderResponse to a *types.Header -func headerResponseToHeader(t *testing.T, header *modules.ChainBlockHeaderResponse) *types.Header { - parentHash, err := common.HexToHash(header.ParentHash) - require.NoError(t, err) - - nb, err := common.HexToBytes(header.Number) - require.NoError(t, err) - number := common.BytesToUint(nb) - - stateRoot, err := common.HexToHash(header.StateRoot) - require.NoError(t, err) - - extrinsicsRoot, err := common.HexToHash(header.ExtrinsicsRoot) - require.NoError(t, err) - - h, err := types.NewHeader(parentHash, stateRoot, extrinsicsRoot, number, rpcLogsToDigest(t, header.Digest.Logs)) - require.NoError(t, err) - return h -} diff --git a/tests/utils/node/errors.go b/tests/utils/node/errors.go new file mode 100644 index 0000000000..c55bd48b01 --- /dev/null +++ b/tests/utils/node/errors.go @@ -0,0 +1,156 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "context" + "errors" + "fmt" + "sync" + "time" +) + +// errorsFanIn takes care of fanning runtime errors from +// different error channels to a single error channel. +// It also handles removal of specific runtime error channels +// from the fan in, which can be useful if one node crashes +// or is stopped on purpose. +type errorsFanIn struct { + nodeToRuntimeError map[string]<-chan error + nodeToFaninCancel map[string]context.CancelFunc + nodeToFaninDone map[string]<-chan struct{} + fifo chan nodeError + mutex sync.RWMutex +} + +type nodeError struct { + node string + err error +} + +// newErrorsFanIn returns a new errors fan in object. +func newErrorsFanIn() *errorsFanIn { + return &errorsFanIn{ + nodeToRuntimeError: make(map[string]<-chan error), + nodeToFaninCancel: make(map[string]context.CancelFunc), + nodeToFaninDone: make(map[string]<-chan struct{}), + fifo: make(chan nodeError), + } +} + +// Add adds a runtime error receiving channel to the fan in mechanism +// for the particular node string given. Note each node string must be +// unique or the code will panic. +func (e *errorsFanIn) Add(node string, runtimeError <-chan error) { + e.mutex.Lock() + defer e.mutex.Unlock() + + // check for duplicate node string + _, exists := e.nodeToRuntimeError[node] + if exists { + panic(fmt.Sprintf("node %q was already added", node)) + } + + e.nodeToRuntimeError[node] = runtimeError + ctx, cancel := context.WithCancel(context.Background()) + e.nodeToFaninCancel[node] = cancel + fanInDone := make(chan struct{}) + e.nodeToFaninDone[node] = fanInDone + + go fanIn(ctx, node, runtimeError, e.fifo, fanInDone) +} + +func fanIn(ctx context.Context, node string, + runtimeError <-chan error, fifo chan<- nodeError, + fanInDone chan<- struct{}) { + defer close(fanInDone) + + select { + case <-ctx.Done(): + return + case err := <-runtimeError: + fifo <- nodeError{ + node: node, + err: err, + } + } +} + +// len returns how many nodes are being monitored +// for runtime errors. +func (e *errorsFanIn) len() (length int) { + e.mutex.RLock() + defer e.mutex.RUnlock() + + return len(e.nodeToRuntimeError) +} + +// remove removes a node from the fan in mechanism +// and clears it from the internal maps. +func (e *errorsFanIn) remove(node string) { + e.mutex.Lock() + defer e.mutex.Unlock() + + e.removeWithoutLock(node) +} + +func (e *errorsFanIn) removeWithoutLock(node string) { + // Stop fanning in + cancelFanIn := e.nodeToFaninCancel[node] + fanInDone := e.nodeToFaninDone[node] + cancelFanIn() + <-fanInDone + + // Clear from maps + delete(e.nodeToRuntimeError, node) + delete(e.nodeToFaninCancel, node) + delete(e.nodeToFaninDone, node) +} + +var ( + ErrWaitTimedOut = errors.New("waiting for all nodes timed out") +) + +// waitForAll waits to collect all the runtime errors from all the +// nodes added and which did not crash previously. +// If the timeout duration specified is reached, all internal +// fan in operations are stopped and all the nodes are cleared from +// the internal maps, and an error is returned. +func (e *errorsFanIn) waitForAll(timeout time.Duration) (err error) { + e.mutex.Lock() + defer e.mutex.Unlock() + + timer := time.NewTimer(timeout) + + length := len(e.nodeToRuntimeError) + for i := 0; i < length; i++ { + select { + case <-timer.C: + for node := range e.nodeToRuntimeError { + e.removeWithoutLock(node) + } + return fmt.Errorf("%w: for %d nodes after %s", + ErrWaitTimedOut, len(e.nodeToRuntimeError), timeout) + case identifiedError := <-e.fifo: // one error per node max + node := identifiedError.node + e.removeWithoutLock(node) + } + } + + _ = timer.Stop() + + return nil +} + +// watch returns the next runtime error from the N runtime +// error channels, in a first in first out mechanism. +func (e *errorsFanIn) watch(ctx context.Context) (err error) { + select { + case <-ctx.Done(): + return ctx.Err() + case identifiedErr := <-e.fifo: // single fatal error + e.remove(identifiedErr.node) + return identifiedErr.err + } +} diff --git a/tests/utils/node/node.go b/tests/utils/node/node.go new file mode 100644 index 0000000000..8bb9d652bc --- /dev/null +++ b/tests/utils/node/node.go @@ -0,0 +1,289 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "testing" + + "github.com/ChainSafe/gossamer/dot/config/toml" + "github.com/ChainSafe/gossamer/lib/utils" + "github.com/ChainSafe/gossamer/tests/utils/config" + "github.com/ChainSafe/gossamer/tests/utils/pathfinder" + "github.com/stretchr/testify/require" +) + +// Node is a structure holding all the settings to +// configure a Gossamer node. +type Node struct { + index *int + configPath string + tomlConfig toml.Config + writer io.Writer + logsBuffer *bytes.Buffer + binPath string +} + +// New returns a node configured using the +// toml configuration and options given. +func New(t *testing.T, tomlConfig toml.Config, + options ...Option) (node Node) { + node.tomlConfig = tomlConfig + for _, option := range options { + option(&node) + } + node.setDefaults(t) + node.setWriterPrefix() + node.configPath = config.Write(t, node.tomlConfig) + return node +} + +func (n Node) String() string { + indexString := fmt.Sprint(*n.index) + return fmt.Sprintf("%s-%s", n.tomlConfig.Account.Key, indexString) +} + +// RPCPort returns the rpc port of the node. +func (n Node) RPCPort() (port string) { return fmt.Sprint(n.tomlConfig.RPC.Port) } + +// WSPort returns the websocket port of the node. +func (n Node) WSPort() (port string) { return fmt.Sprint(n.tomlConfig.RPC.WSPort) } + +// Key returns the key of the node. +func (n Node) Key() (key string) { return n.tomlConfig.Account.Key } + +func intPtr(n int) *int { return &n } + +func (n *Node) setDefaults(t *testing.T) { + if n.index == nil { + n.index = intPtr(0) + } + + if n.tomlConfig.Global.BasePath == "" { + n.tomlConfig.Global.BasePath = t.TempDir() + } + + if n.tomlConfig.Init.Genesis == "" { + n.tomlConfig.Init.Genesis = utils.GetGssmrGenesisRawPathTest(t) + } + + if n.tomlConfig.Account.Key == "" { + keyList := []string{"alice", "bob", "charlie", "dave", "eve", "ferdie", "george", "heather", "ian"} + if *n.index < len(keyList) { + n.tomlConfig.Account.Key = keyList[*n.index] + } else { + n.tomlConfig.Account.Key = "default-key" + } + } + + if n.tomlConfig.Network.Port == 0 { + const basePort uint16 = 7000 + n.tomlConfig.Network.Port = basePort + uint16(*n.index) + } + + if n.tomlConfig.RPC.Enabled && n.tomlConfig.RPC.Port == 0 { + const basePort uint32 = 8540 + n.tomlConfig.RPC.Port = basePort + uint32(*n.index) + } + + if n.tomlConfig.RPC.WS && n.tomlConfig.RPC.WSPort == 0 { + const basePort uint32 = 8546 + n.tomlConfig.RPC.WSPort = basePort + uint32(*n.index) + } + + userSetWriter := n.writer != nil && n.writer != io.Discard + if !userSetWriter { + n.logsBuffer = bytes.NewBuffer(nil) + } + + if n.writer == nil { + n.writer = io.Discard + } + + if n.binPath == "" { + n.binPath = pathfinder.GetGossamer(t) + } +} + +// Init initialises the Gossamer node. +func (n *Node) Init(ctx context.Context) (err error) { + cmdInit := exec.CommandContext(ctx, n.binPath, "init", //nolint:gosec + "--config", n.configPath, + ) + + if n.logsBuffer != nil { + n.logsBuffer.Reset() + n.writer = io.MultiWriter(n.writer, n.logsBuffer) + } + + cmdInit.Stdout = n.writer + cmdInit.Stderr = n.writer + + err = cmdInit.Start() + if err != nil { + return fmt.Errorf("cannot start command: %w", err) + } + + err = cmdInit.Wait() + return n.wrapRuntimeError(ctx, cmdInit, err) +} + +// Start starts a Gossamer node using the node configuration of +// the receiving struct. It returns a start error if the node cannot +// be started, and runs the node until the context gets canceled. +// When the node crashes or is stopped, an error (nil or not) is sent +// in the waitErrCh. +func (n *Node) Start(ctx context.Context) (runtimeError <-chan error, startErr error) { + cmd := exec.CommandContext(ctx, n.binPath, //nolint:gosec + "--config", n.configPath, + "--no-telemetry") + + if n.logsBuffer != nil { + n.logsBuffer.Reset() + n.writer = io.MultiWriter(n.writer, n.logsBuffer) + } + + cmd.Stdout = n.writer + cmd.Stderr = cmd.Stdout // we assume no race between stdout and stderr + + err := cmd.Start() + if err != nil { + return nil, fmt.Errorf("cannot start %s: %w", cmd, err) + } + + waitErrCh := make(chan error) + go func(cmd *exec.Cmd, node *Node, waitErr chan<- error) { + err = cmd.Wait() + waitErr <- node.wrapRuntimeError(ctx, cmd, err) + }(cmd, n, waitErrCh) + + return waitErrCh, nil +} + +// StartAndWait starts a Gossamer node using the node configuration of +// the receiving struct. It returns a start error if the node cannot +// be started, and runs the node until the context gets canceled. +// When the node crashes or is stopped, an error (nil or not) is sent +// in the waitErrCh. +// It waits for the node to respond to an RPC health call before returning. +func (n *Node) StartAndWait(ctx context.Context) ( + runtimeError <-chan error, startErr error) { + runtimeError, startErr = n.Start(ctx) + if startErr != nil { + return nil, startErr + } + + err := waitForNode(ctx, n.RPCPort()) + if err != nil { + return nil, fmt.Errorf("failed waiting: %s", err) + } + + return runtimeError, nil +} + +// InitAndStartTest is a test helper method to initialise and start the node, +// as well as registering appriopriate test handlers. +// If initialising or starting fails, cleanup is done and the test fails instantly. +// If the node crashes during runtime, the passed `signalTestToStop` argument is +// called since the test cannot be failed from outside the main test goroutine. +func (n Node) InitAndStartTest(ctx context.Context, t *testing.T, + signalTestToStop context.CancelFunc) { + t.Helper() + + err := n.Init(ctx) + require.NoError(t, err) + + nodeCtx, nodeCancel := context.WithCancel(ctx) + + waitErr, err := n.StartAndWait(nodeCtx) + if err != nil { + t.Errorf("failed to start node %s: %s", n, err) + // Release resources and fail the test + nodeCancel() + t.FailNow() + } + + t.Logf("Node %s is ready", n) + + // watch for runtime fatal node error + watchDogCtx, watchDogCancel := context.WithCancel(ctx) + watchDogDone := make(chan struct{}) + go func() { + defer close(watchDogDone) + select { + case <-watchDogCtx.Done(): + return + case err := <-waitErr: // the node crashed + if watchDogCtx.Err() != nil { + // make sure the runtime watchdog is not meant + // to be disengaged, in case of signal racing. + return + } + t.Errorf("node %s crashed: %s", n, err) + // Release resources + nodeCancel() + // we cannot stop the test with t.FailNow() from a goroutine + // other than the test goroutine, so we call the following function + // to signal the test goroutine to stop the test. + signalTestToStop() + } + }() + + t.Cleanup(func() { + t.Helper() + // Disengage node watchdog goroutine + watchDogCancel() + <-watchDogDone + // Stop the node and wait for it to exit + nodeCancel() + <-waitErr + t.Logf("Node %s terminated", n) + }) +} + +func (n *Node) setWriterPrefix() { + if n.writer == io.Discard { + return // no need to wrap it + } + + n.writer = &prefixedWriter{ + prefix: []byte(n.String() + " "), + writer: n.writer, + } +} + +// wrapRuntimeError wraps the error given using the context available +// such as the command string or the log buffer. It returns nil if the +// argument error is nil. +func (n *Node) wrapRuntimeError(ctx context.Context, cmd *exec.Cmd, + waitErr error) (wrappedErr error) { + if waitErr == nil { + return nil + } + + if ctx.Err() != nil { + return fmt.Errorf("%s: %w: %s", n, ctx.Err(), waitErr) + } + + var logInformation string + if n.logsBuffer != nil { + // Add log information to error if no writer is set + // for this node. + logInformation = "\nLogs:\n" + n.logsBuffer.String() + } + + configData, configReadErr := os.ReadFile(n.configPath) + configString := string(configData) + if configReadErr != nil { + configString = configReadErr.Error() + } + + return fmt.Errorf("%s encountered a runtime error: %w\ncommand: %s\n\n%s\n\n%s", + n, waitErr, cmd, configString, logInformation) +} diff --git a/tests/utils/node/node_test.go b/tests/utils/node/node_test.go new file mode 100644 index 0000000000..42cc20a514 --- /dev/null +++ b/tests/utils/node/node_test.go @@ -0,0 +1,28 @@ +//go:build endtoend + +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "context" + "testing" + "time" + + "github.com/ChainSafe/gossamer/tests/utils/config" +) + +func Test_Node_InitAndStartTest(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + t.Cleanup(cancel) + + tomlConfig := config.Default() + tomlConfig.Core.BABELead = true + + n := New(t, tomlConfig) + + n.InitAndStartTest(ctx, t, cancel) + + cancel() +} diff --git a/tests/utils/node/nodes.go b/tests/utils/node/nodes.go new file mode 100644 index 0000000000..04515cef03 --- /dev/null +++ b/tests/utils/node/nodes.go @@ -0,0 +1,176 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/ChainSafe/gossamer/dot/config/toml" +) + +// Nodes is a slice of nodes. +type Nodes []Node + +// MakeNodes creates `num` nodes using the `tomlConfig` +// as a base config for each node. It overrides some of configuration: +// - the first node is always the BABE lead (overrides the toml configuration) +// - the index of each node is incremented per node (overrides the SetIndex option, if set) +func MakeNodes(t *testing.T, num int, tomlConfig toml.Config, + options ...Option) (nodes Nodes) { + nodes = make(Nodes, num) + for i := range nodes { + options = append(options, SetIndex(i)) + tomlConfig.Core.BABELead = i == 0 + nodes[i] = New(t, tomlConfig, options...) + } + return nodes +} + +// Init initialises all nodes and returns an error if any +// init operation failed. +func (nodes Nodes) Init(ctx context.Context) (err error) { + initErrors := make(chan error) + for _, node := range nodes { + go func(node Node) { + err := node.Init(ctx) // takes 2 seconds + if err != nil { + err = fmt.Errorf("node %s failed to initialise: %w", node, err) + } + initErrors <- err + }(node) + } + + for range nodes { + initErr := <-initErrors + if err == nil && initErr != nil { + err = initErr + } + } + + return err +} + +// Start starts all the nodes and returns the number of started nodes +// and an eventual start error. The started number should be used by +// the caller to wait for `started` errors coming from the wait error +// channel. All the nodes are stopped when the context is canceled, +// and `started` errors will be sent in the waitErr channel. +func (nodes Nodes) Start(ctx context.Context) ( + runtimeErrors []<-chan error, startErr error) { + runtimeErrors = make([]<-chan error, 0, len(nodes)) + for _, node := range nodes { + runtimeError, err := node.Start(ctx) + if err != nil { + return runtimeErrors, fmt.Errorf("node with index %d: %w", + *node.index, err) + } + + runtimeErrors = append(runtimeErrors, runtimeError) + } + + for _, node := range nodes { + port := node.RPCPort() + err := waitForNode(ctx, port) + if err != nil { + return runtimeErrors, fmt.Errorf("node with index %d: %w", *node.index, err) + } + } + + return runtimeErrors, nil +} + +// InitAndStartTest is a test helper method to initialise and start nodes, +// as well as registering appriopriate test handlers. +// If any node fails to initialise or start, cleanup is done and the test +// is instantly failed. +// If any node crashes at runtime, all other nodes are shutdown, +// cleanup is done and the passed argument `signalTestToStop` +// is called to signal to the main test goroutine to stop. +func (nodes Nodes) InitAndStartTest(ctx context.Context, t *testing.T, + signalTestToStop context.CancelFunc) { + t.Helper() + + err := nodes.Init(ctx) + if err != nil { + t.Fatal(err) + } + + nodesCtx, nodesCancel := context.WithCancel(ctx) + runtimeErrors := newErrorsFanIn() + + for _, node := range nodes { + runtimeError, err := node.Start(nodesCtx) // takes little time + if err == nil { + runtimeErrors.Add(node.String(), runtimeError) + continue + } + + t.Errorf("Node %s failed to start: %s", node, err) + + stopNodes(t, nodesCancel, runtimeErrors) + t.FailNow() + } + + // this is run sequentially since all nodes start almost at the same time + // so waiting for one node will also wait for all the others. + // You can see this since the test logs out that all the nodes are ready + // at the same time. + for _, node := range nodes { + err := waitForNode(ctx, node.RPCPort()) + if err == nil { + t.Logf("Node %s is ready", node) + continue + } + + t.Errorf("Node %s failed to be ready: %s", node, err) + stopNodes(t, nodesCancel, runtimeErrors) + t.FailNow() + } + + // watch for runtime fatal error from any of the nodes + watchDogCtx, watchDogCancel := context.WithCancel(ctx) + watchDogDone := make(chan struct{}) + go func() { + defer close(watchDogDone) + err := runtimeErrors.watch(watchDogCtx) + watchDogWasStopped := errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) + if watchDogWasStopped { + return + } + + t.Errorf("one node has crashed: %s", err) + // we cannot stop the test with t.FailNow() from a goroutine + // other than the test goroutine, so we call failNow to signal + // it to the test goroutine. + signalTestToStop() + }() + + t.Cleanup(func() { + t.Helper() + // Disengage node watchdog goroutine + watchDogCancel() + <-watchDogDone + // Stop and wait for nodes to exit + stopNodes(t, nodesCancel, runtimeErrors) + }) +} + +func stopNodes(t *testing.T, nodesCancel context.CancelFunc, + runtimeErrors *errorsFanIn) { + t.Helper() + + // Stop the nodes and wait for them to exit + nodesCancel() + t.Logf("waiting on %d nodes to terminate...", runtimeErrors.len()) + const waitTimeout = 10 * time.Second + err := runtimeErrors.waitForAll(waitTimeout) + if err != nil { + t.Logf("WARNING: %s", err) + } +} diff --git a/tests/utils/node/options.go b/tests/utils/node/options.go new file mode 100644 index 0000000000..0e7923b37f --- /dev/null +++ b/tests/utils/node/options.go @@ -0,0 +1,23 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import "io" + +// Option is an option to use with the `New` constructor. +type Option func(node *Node) + +// SetIndex sets the index for the node. +func SetIndex(index int) Option { + return func(node *Node) { + node.index = intPtr(index) + } +} + +// SetWriter sets the writer for the node. +func SetWriter(writer io.Writer) Option { + return func(node *Node) { + node.writer = writer + } +} diff --git a/tests/utils/node/waitnode.go b/tests/utils/node/waitnode.go new file mode 100644 index 0000000000..41f0411d51 --- /dev/null +++ b/tests/utils/node/waitnode.go @@ -0,0 +1,47 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/ChainSafe/gossamer/tests/utils/retry" + "github.com/ChainSafe/gossamer/tests/utils/rpc" +) + +func waitForNode(ctx context.Context, rpcPort string) (err error) { + const retryWait = time.Second + err = retry.UntilNoError(ctx, retryWait, func() (err error) { + const checkNodeStartedTimeout = time.Second + checkNodeCtx, checkNodeCancel := context.WithTimeout(ctx, checkNodeStartedTimeout) + err = checkNodeStarted(checkNodeCtx, "http://localhost:"+rpcPort) + checkNodeCancel() + return err + }) + + if err != nil { + return fmt.Errorf("node did not start: %w", err) + } + + return nil +} + +var errNodeNotExpectingPeers = errors.New("node should expect to have peers") + +// checkNodeStarted check if gossamer node is started +func checkNodeStarted(ctx context.Context, gossamerHost string) error { + health, err := rpc.GetHealth(ctx, gossamerHost) + if err != nil { + return fmt.Errorf("cannot get health: %w", err) + } + + if !health.ShouldHavePeers { + return errNodeNotExpectingPeers + } + + return nil +} diff --git a/tests/utils/node/writer.go b/tests/utils/node/writer.go new file mode 100644 index 0000000000..7dd0228161 --- /dev/null +++ b/tests/utils/node/writer.go @@ -0,0 +1,28 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "io" +) + +type prefixedWriter struct { + prefix []byte + writer io.Writer +} + +func (w *prefixedWriter) Write(p []byte) (n int, err error) { + toWrite := make([]byte, 0, len(w.prefix)+len(p)) + toWrite = append(toWrite, w.prefix...) + toWrite = append(toWrite, p...) + n, err = w.writer.Write(toWrite) + + // n has to match the length of p + n -= len(w.prefix) + if n < 0 { + n = 0 + } + + return n, err +} diff --git a/tests/utils/node/writer_test.go b/tests/utils/node/writer_test.go new file mode 100644 index 0000000000..c53487d2cb --- /dev/null +++ b/tests/utils/node/writer_test.go @@ -0,0 +1,38 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_prefixedWriter(t *testing.T) { + t.Parallel() + + writer := bytes.NewBuffer(nil) + prefixWriter := &prefixedWriter{ + prefix: []byte("prefix: "), + writer: writer, + } + + message := []byte("message\n") + n, err := prefixWriter.Write(message) + require.NoError(t, err) + expectedBytesWrittenCount := 8 + assert.Equal(t, expectedBytesWrittenCount, n) + expectedWritten := "prefix: message\n" + assert.Equal(t, expectedWritten, writer.String()) + + message = []byte("message two\n") + n, err = prefixWriter.Write(message) + require.NoError(t, err) + expectedBytesWrittenCount = 12 + assert.Equal(t, expectedBytesWrittenCount, n) + expectedWritten = "prefix: message\nprefix: message two\n" + assert.Equal(t, expectedWritten, writer.String()) +} diff --git a/tests/utils/pathfinder/gossamer.go b/tests/utils/pathfinder/gossamer.go new file mode 100644 index 0000000000..7164940dcd --- /dev/null +++ b/tests/utils/pathfinder/gossamer.go @@ -0,0 +1,22 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package pathfinder + +import ( + "path/filepath" + "testing" + + "github.com/ChainSafe/gossamer/lib/utils" + "github.com/stretchr/testify/require" +) + +// GetGossamer returns the path to the Gossamer binary +// as /bin/gossamer. +func GetGossamer(t *testing.T) (binPath string) { + t.Helper() + + projectRootPath, err := utils.GetProjectRootPath() + require.NoError(t, err, "cannot get project root path") + return filepath.Join(projectRootPath, "bin/gossamer") +} diff --git a/tests/utils/request_utils.go b/tests/utils/request_utils.go deleted file mode 100644 index 4e07bbab80..0000000000 --- a/tests/utils/request_utils.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/pkg/scale" - - "github.com/stretchr/testify/require" -) - -// PostRPC sends a payload using the method, host and params string given. -// It returns the response bytes and an eventual error. -func PostRPC(ctx context.Context, endpoint, method, params string) (data []byte, err error) { - requestBody := fmt.Sprintf(`{"jsonrpc":"2.0","method":"%s","params":%s,"id":1}`, method, params) - requestBuffer := bytes.NewBuffer([]byte(requestBody)) - - request, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, requestBuffer) - if err != nil { - return nil, fmt.Errorf("cannot create HTTP request: %w", err) - } - - const contentType = "application/json" - request.Header.Set("Content-Type", contentType) - request.Header.Set("Accept", contentType) - - response, err := http.DefaultClient.Do(request) - if err != nil { - return nil, fmt.Errorf("cannot do HTTP request: %w", err) - } - - data, err = io.ReadAll(response.Body) - if err != nil { - _ = response.Body.Close() - return nil, fmt.Errorf("cannot read HTTP response body: %w", err) - } - - err = response.Body.Close() - if err != nil { - return nil, fmt.Errorf("cannot close HTTP response body: %w", err) - } - - return data, nil -} - -// PostRPCWithRetry repeatitively calls `PostRPC` repeatitively -// until it succeeds within the requestWait duration or returns -// the last error if the context is canceled. -func PostRPCWithRetry(ctx context.Context, endpoint, method, params string, - requestWait time.Duration) (data []byte, err error) { - try := 0 - for { - try++ - - postRPCCtx, postRPCCancel := context.WithTimeout(ctx, requestWait) - - data, err = PostRPC(postRPCCtx, endpoint, method, params) - - if err == nil { - postRPCCancel() - return data, nil - } - - // wait for full requestWait duration or main context cancelation - <-postRPCCtx.Done() - postRPCCancel() - - if ctx.Err() != nil { - break - } - } - - totalTime := time.Duration(try) * requestWait - tryWord := "try" - if try > 1 { - tryWord = "tries" - } - return nil, fmt.Errorf("after %d %s totalling %s: %w", try, tryWord, totalTime, err) -} - -// DecodeRPC will decode []body into target interface -func DecodeRPC(t *testing.T, body []byte, target interface{}) error { - decoder := json.NewDecoder(bytes.NewReader(body)) - decoder.DisallowUnknownFields() - - var response ServerResponse - err := decoder.Decode(&response) - require.Nil(t, err, string(body)) - require.Equal(t, response.Version, "2.0") - - if response.Error != nil { - return errors.New(response.Error.Message) - } - - decoder = json.NewDecoder(bytes.NewReader(response.Result)) - decoder.DisallowUnknownFields() - - err = decoder.Decode(target) - require.Nil(t, err, string(body)) - return nil -} - -// DecodeWebsocket will decode body into target interface -func DecodeWebsocket(t *testing.T, body []byte, target interface{}) error { - decoder := json.NewDecoder(bytes.NewReader(body)) - decoder.DisallowUnknownFields() - - var response WebsocketResponse - err := decoder.Decode(&response) - require.Nil(t, err, string(body)) - require.Equal(t, response.Version, "2.0") - - if response.Error != nil { - return errors.New(response.Error.Message) - } - - if response.Result != nil { - decoder = json.NewDecoder(bytes.NewReader(response.Result)) - } else { - decoder = json.NewDecoder(bytes.NewReader(response.Params)) - } - - decoder.DisallowUnknownFields() - - err = decoder.Decode(target) - require.Nil(t, err, string(body)) - return nil -} - -// DecodeRPC_NT will decode []body into target interface (NT is Not Test testing required) -func DecodeRPC_NT(body []byte, target interface{}) error { //nolint:revive - decoder := json.NewDecoder(bytes.NewReader(body)) - decoder.DisallowUnknownFields() - - var response ServerResponse - err := decoder.Decode(&response) - if err != nil { - return err - } - - if response.Error != nil { - return errors.New(response.Error.Message) - } - - decoder = json.NewDecoder(bytes.NewReader(response.Result)) - decoder.DisallowUnknownFields() - - err = decoder.Decode(target) - return err -} - -// NewEndpoint will create a new endpoint string based on utils.HOSTNAME and port -func NewEndpoint(port string) string { - return "http://" + HOSTNAME + ":" + port -} - -func rpcLogsToDigest(t *testing.T, logs []string) scale.VaryingDataTypeSlice { - digest := types.NewDigest() - - for _, l := range logs { - itemBytes, err := common.HexToBytes(l) - require.NoError(t, err) - - var di = types.NewDigestItem() - err = scale.Unmarshal(itemBytes, &di) - require.NoError(t, err) - - err = digest.Add(di.Value()) - require.NoError(t, err) - } - - return digest -} diff --git a/tests/utils/retry/common.go b/tests/utils/retry/common.go new file mode 100644 index 0000000000..5562614ba9 --- /dev/null +++ b/tests/utils/retry/common.go @@ -0,0 +1,28 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package retry + +import ( + "context" + "fmt" + "time" +) + +func waitAfterFail(ctx context.Context, retryWait time.Duration, + failedTries *int) { + *failedTries++ + waitCtx, waitCancel := context.WithTimeout(ctx, retryWait) + <-waitCtx.Done() + waitCancel() +} + +func makeError(failedTries int, retryWait time.Duration, ctxErr error) (err error) { + totalRetryTime := time.Duration(failedTries) * retryWait + tryWord := "try" + if failedTries > 1 { + tryWord = "tries" + } + return fmt.Errorf("failed after %d %s during %s (%w)", + failedTries, tryWord, totalRetryTime, ctxErr) +} diff --git a/tests/utils/retry/untilnoerror.go b/tests/utils/retry/untilnoerror.go new file mode 100644 index 0000000000..c176344544 --- /dev/null +++ b/tests/utils/retry/untilnoerror.go @@ -0,0 +1,29 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package retry + +import ( + "context" + "time" +) + +// UntilNoError retries the function `f` until it returns a nil error. +// It waits `retryWait` after each failed call to `f`. +// If the context `ctx` is canceled, the function returns +// immediately an error stating the number of failed tries, +// for how long it retried and the last error returned by `f`. +func UntilNoError(ctx context.Context, retryWait time.Duration, + f func() (err error)) (err error) { + failedTries := 0 + for ctx.Err() == nil { + err = f() + if err == nil { + return nil + } + + waitAfterFail(ctx, retryWait, &failedTries) + } + + return makeError(failedTries, retryWait, ctx.Err()) +} diff --git a/tests/utils/retry/untilok.go b/tests/utils/retry/untilok.go new file mode 100644 index 0000000000..9b85d89248 --- /dev/null +++ b/tests/utils/retry/untilok.go @@ -0,0 +1,33 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package retry + +import ( + "context" + "fmt" + "time" +) + +// UntilOK retries the function `f` until it returns a true +// value for `ok` or a non nil error. +// It waits `retryWait` after each failed call to `f`. +// If the context `ctx` is canceled, the function returns +// immediately an error stating the number of failed tries, +// for how long it retried and the context error. +func UntilOK(ctx context.Context, retryWait time.Duration, + f func() (ok bool, err error)) (err error) { + failedTries := 0 + for ctx.Err() == nil { + ok, err := f() + if ok { + return nil + } else if err != nil { + return fmt.Errorf("stop retrying function: %w", err) + } + + waitAfterFail(ctx, retryWait, &failedTries) + } + + return makeError(failedTries, retryWait, ctx.Err()) +} diff --git a/tests/utils/rpc/chain.go b/tests/utils/rpc/chain.go new file mode 100644 index 0000000000..74094ff7cb --- /dev/null +++ b/tests/utils/rpc/chain.go @@ -0,0 +1,131 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "context" + "fmt" + "strconv" + + "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +// GetChainHead calls the endpoint chain_getHeader to get the latest chain head +func GetChainHead(ctx context.Context, rpcPort string) (header *types.Header, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "chain_getHeader" + const params = "[]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return nil, fmt.Errorf("cannot post RPC: %w", err) + } + + var rpcHeader modules.ChainBlockHeaderResponse + err = Decode(respBody, &rpcHeader) + if err != nil { + return nil, fmt.Errorf("cannot decode RPC response: %w", err) + } + + header, err = headerResponseToHeader(rpcHeader) + if err != nil { + return nil, fmt.Errorf("malformed block header received: %w", err) + } + + return header, nil +} + +// GetBlockHash calls the endpoint chain_getBlockHash to get the latest chain head. +// It will block until a response is received or the context gets canceled. +func GetBlockHash(ctx context.Context, rpcPort, num string) (hash common.Hash, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "chain_getBlockHash" + params := "[" + num + "]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return hash, fmt.Errorf("cannot post RPC: %w", err) + } + + return hexStringBodyToHash(respBody) +} + +// GetFinalizedHead calls the endpoint chain_getFinalizedHead to get the latest finalised head +func GetFinalizedHead(ctx context.Context, rpcPort string) ( + hash common.Hash, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "chain_getFinalizedHead" + const params = "[]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return hash, fmt.Errorf("cannot post RPC: %w", err) + } + + return hexStringBodyToHash(respBody) +} + +// GetFinalizedHeadByRound calls the endpoint chain_getFinalizedHeadByRound to get the finalised head at a given round +// TODO: add setID, hard-coded at 1 for now +func GetFinalizedHeadByRound(ctx context.Context, rpcPort string, round uint64) ( + hash common.Hash, err error) { + p := strconv.Itoa(int(round)) + endpoint := NewEndpoint(rpcPort) + const method = "chain_getFinalizedHeadByRound" + params := "[" + p + ",1]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return hash, fmt.Errorf("cannot post RPC: %w", err) + } + + return hexStringBodyToHash(respBody) +} + +// GetBlock calls the endpoint chain_getBlock +func GetBlock(ctx context.Context, rpcPort string, hash common.Hash) ( + block *types.Block, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "chain_getBlock" + params := fmt.Sprintf(`["%s"]`, hash) + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return nil, fmt.Errorf("cannot post RPC: %w", err) + } + + rpcBlock := new(modules.ChainBlockResponse) + err = Decode(respBody, rpcBlock) + if err != nil { + return nil, fmt.Errorf("cannot decode RPC response body: %w", err) + } + + rpcHeader := rpcBlock.Block.Header + header, err := headerResponseToHeader(rpcHeader) + if err != nil { + return nil, fmt.Errorf("malformed block header received: %w", err) + } + + body, err := types.NewBodyFromExtrinsicStrings(rpcBlock.Block.Body) + if err != nil { + return nil, fmt.Errorf("cannot create body from RPC block body: %w", err) + } + + return &types.Block{ + Header: *header, + Body: *body, + }, nil +} + +func hexStringBodyToHash(body []byte) (hash common.Hash, err error) { + var hexHashString string + err = Decode(body, &hexHashString) + if err != nil { + return common.Hash{}, fmt.Errorf("cannot decode RPC: %w", err) + } + + hash, err = common.HexToHash(hexHashString) + if err != nil { + return common.Hash{}, fmt.Errorf("malformed block hash hex string: %w", err) + } + + return hash, nil +} diff --git a/tests/utils/rpc/dev.go b/tests/utils/rpc/dev.go new file mode 100644 index 0000000000..4c4e81efae --- /dev/null +++ b/tests/utils/rpc/dev.go @@ -0,0 +1,76 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "context" + "encoding/binary" + "fmt" + "time" + + "github.com/ChainSafe/gossamer/lib/common" +) + +// PauseBABE calls the endpoint dev_control with the params ["babe", "stop"] +func PauseBABE(ctx context.Context, rpcPort string) error { + endpoint := NewEndpoint(rpcPort) + const method = "dev_control" + const params = `["babe", "stop"]` + _, err := Post(ctx, endpoint, method, params) + return err +} + +// SlotDuration Calls dev endpoint for slot duration +func SlotDuration(ctx context.Context, rpcPort string) ( + slotDuration time.Duration, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "dev_slotDuration" + const params = "[]" + data, err := Post(ctx, endpoint, method, params) + if err != nil { + return 0, fmt.Errorf("cannot post RPC: %w", err) + } + + var slotDurationString string + err = Decode(data, &slotDurationString) + if err != nil { + return 0, fmt.Errorf("cannot decode RPC response: %w", err) + } + + b, err := common.HexToBytes(slotDurationString) + if err != nil { + return 0, fmt.Errorf("malformed slot duration hex string: %w", err) + } + + slotDurationUint64 := binary.LittleEndian.Uint64(b) + + slotDuration = time.Millisecond * time.Duration(slotDurationUint64) + + return slotDuration, nil +} + +// EpochLength Calls dev endpoint for epoch length +func EpochLength(ctx context.Context, rpcPort string) (epochLength uint64, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "dev_epochLength" + const params = "[]" + data, err := Post(ctx, endpoint, method, params) + if err != nil { + return 0, fmt.Errorf("cannot post RPC: %w", err) + } + + var epochLengthHexString string + err = Decode(data, &epochLengthHexString) + if err != nil { + return 0, fmt.Errorf("cannot decode RPC response: %w", err) + } + + b, err := common.HexToBytes(epochLengthHexString) + if err != nil { + return 0, fmt.Errorf("malformed epoch length hex string: %w", err) + } + + epochLength = binary.LittleEndian.Uint64(b) + return epochLength, nil +} diff --git a/tests/utils/rpc/header.go b/tests/utils/rpc/header.go new file mode 100644 index 0000000000..63e3184e86 --- /dev/null +++ b/tests/utils/rpc/header.go @@ -0,0 +1,49 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "fmt" + + "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +// headerResponseToHeader converts a *ChainBlockHeaderResponse to a *types.Header +func headerResponseToHeader(rpcHeader modules.ChainBlockHeaderResponse) (header *types.Header, err error) { + parentHash, err := common.HexToHash(rpcHeader.ParentHash) + if err != nil { + return nil, fmt.Errorf("malformed parent hash: %w", err) + } + + nb, err := common.HexToBytes(rpcHeader.Number) + if err != nil { + return nil, fmt.Errorf("malformed number hex string: %w", err) + } + + number := common.BytesToUint(nb) + + stateRoot, err := common.HexToHash(rpcHeader.StateRoot) + if err != nil { + return nil, fmt.Errorf("malformed state root: %w", err) + } + + extrinsicsRoot, err := common.HexToHash(rpcHeader.ExtrinsicsRoot) + if err != nil { + return nil, fmt.Errorf("malformed extrinsic root: %w", err) + } + + digest, err := rpcLogsToDigest(rpcHeader.Digest.Logs) + if err != nil { + return nil, fmt.Errorf("malformed digest logs: %w", err) + } + + header, err = types.NewHeader(parentHash, stateRoot, extrinsicsRoot, number, digest) + if err != nil { + return nil, fmt.Errorf("cannot create new header: %w", err) + } + + return header, nil +} diff --git a/tests/utils/rpc/request.go b/tests/utils/rpc/request.go new file mode 100644 index 0000000000..1e0a5c3cc5 --- /dev/null +++ b/tests/utils/rpc/request.go @@ -0,0 +1,123 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/pkg/scale" +) + +// Post sends a payload using the method, host and params string given. +// It returns the response bytes and an eventual error. +func Post(ctx context.Context, endpoint, method, params string) (data []byte, err error) { + requestBody := fmt.Sprintf(`{"jsonrpc":"2.0","method":"%s","params":%s,"id":1}`, method, params) + requestBuffer := bytes.NewBuffer([]byte(requestBody)) + + request, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, requestBuffer) + if err != nil { + return nil, fmt.Errorf("cannot create HTTP request: %w", err) + } + + const contentType = "application/json" + request.Header.Set("Content-Type", contentType) + request.Header.Set("Accept", contentType) + + response, err := http.DefaultClient.Do(request) + if err != nil { + return nil, fmt.Errorf("cannot do HTTP request: %w", err) + } + + data, err = io.ReadAll(response.Body) + if err != nil { + _ = response.Body.Close() + return nil, fmt.Errorf("cannot read HTTP response body: %w", err) + } + + err = response.Body.Close() + if err != nil { + return nil, fmt.Errorf("cannot close HTTP response body: %w", err) + } + + return data, nil +} + +var ( + ErrResponseVersion = errors.New("unexpected response version received") + ErrResponseError = errors.New("response error received") +) + +// Decode decodes []body into the target interface. +func Decode(body []byte, target interface{}) error { + decoder := json.NewDecoder(bytes.NewReader(body)) + decoder.DisallowUnknownFields() + + var response ServerResponse + err := decoder.Decode(&response) + if err != nil { + return fmt.Errorf("cannot decode response: %s: %w", + string(body), err) + } + + if response.Version != "2.0" { + return fmt.Errorf("%w: %s", ErrResponseVersion, response.Version) + } + + if response.Error != nil { + return fmt.Errorf("%w: %s (error code %d)", + ErrResponseError, response.Error.Message, response.Error.ErrorCode) + } + + jsonRawMessage := response.Result + if jsonRawMessage == nil { + jsonRawMessage = response.Params + } + decoder = json.NewDecoder(bytes.NewReader(jsonRawMessage)) + decoder.DisallowUnknownFields() + + err = decoder.Decode(target) + if err != nil { + return fmt.Errorf("cannot decode response result: %s: %w", + string(response.Result), err) + } + + return nil +} + +// NewEndpoint returns http://localhost: +func NewEndpoint(port string) string { + return "http://localhost:" + port +} + +func rpcLogsToDigest(logs []string) (digest scale.VaryingDataTypeSlice, err error) { + digest = types.NewDigest() + + for _, l := range logs { + itemBytes, err := common.HexToBytes(l) + if err != nil { + return digest, fmt.Errorf("malformed digest item hex string: %w", err) + } + + di := types.NewDigestItem() + err = scale.Unmarshal(itemBytes, &di) + if err != nil { + return digest, fmt.Errorf("malformed digest item bytes: %w", err) + } + + err = digest.Add(di.Value()) + if err != nil { + return digest, fmt.Errorf("cannot add digest item to digest: %w", err) + } + } + + return digest, nil +} diff --git a/tests/utils/rpc/system.go b/tests/utils/rpc/system.go new file mode 100644 index 0000000000..332190409b --- /dev/null +++ b/tests/utils/rpc/system.go @@ -0,0 +1,49 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import ( + "context" + "fmt" + + "github.com/ChainSafe/gossamer/dot/rpc/modules" + "github.com/ChainSafe/gossamer/lib/common" +) + +// GetPeers calls the endpoint system_peers +func GetPeers(ctx context.Context, rpcPort string) (peers []common.PeerInfo, err error) { + endpoint := NewEndpoint(rpcPort) + const method = "system_peers" + const params = "[]" + respBody, err := Post(ctx, endpoint, method, params) + if err != nil { + return nil, fmt.Errorf("cannot post RPC: %w", err) + } + + var peersResponse modules.SystemPeersResponse + err = Decode(respBody, &peersResponse) + if err != nil { + return nil, fmt.Errorf("cannot decode RPC: %w", err) + } + + return peersResponse, nil +} + +// GetHealth sends an RPC request to `system_health`. +func GetHealth(ctx context.Context, address string) ( + health modules.SystemHealthResponse, err error) { + const method = "system_health" + const params = "{}" + respBody, err := Post(ctx, address, method, params) + if err != nil { + return health, fmt.Errorf("cannot post RPC: %w", err) + } + + err = Decode(respBody, &health) + if err != nil { + return health, fmt.Errorf("cannot decode RPC: %w", err) + } + + return health, nil +} diff --git a/tests/utils/rpc/types.go b/tests/utils/rpc/types.go new file mode 100644 index 0000000000..b26590a37b --- /dev/null +++ b/tests/utils/rpc/types.go @@ -0,0 +1,30 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package rpc + +import "encoding/json" + +// ServerResponse wraps the RPC response +type ServerResponse struct { + // JSON-RPC Version + Version string `json:"jsonrpc"` + // Method name called + Method string `json:"method"` + // Resulting values + Result json.RawMessage `json:"result"` + // Params values including results + Params json.RawMessage `json:"params"` + // Any generated errors + Error *Error `json:"error"` + Subscription *json.RawMessage `json:"subscription"` + // Request id + ID *json.RawMessage `json:"id"` +} + +// Error is a struct that holds the error message and the error code for a error +type Error struct { + Message string `json:"message"` + ErrorCode int `json:"code"` + Data map[string]interface{} `json:"data"` +} diff --git a/tests/utils/rpc_methods.go b/tests/utils/rpc_methods.go deleted file mode 100644 index 6b5d44060d..0000000000 --- a/tests/utils/rpc_methods.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -//nolint:revive -var ( - // CHAIN METHODS - ChainGetBlock = "chain_getBlock" - ChainGetHeader = "chain_getHeader" - ChainGetFinalizedHead = "chain_getFinalizedHead" - ChainGetFinalizedHeadByRound = "chain_getFinalizedHeadByRound" - ChainGetBlockHash = "chain_getBlockHash" - - // AUTHOR METHODS - AuthorSubmitExtrinsic = "author_submitExtrinsic" - AuthorPendingExtrinsics = "author_pendingExtrinsics" - - // STATE METHODS - StateGetStorage = "state_getStorage" - - // DEV METHODS - DevControl = "dev_control" - - // GRANDPA - GrandpaProveFinality = "grandpa_proveFinality" -) diff --git a/tests/utils/system.go b/tests/utils/system.go deleted file mode 100644 index e193a8436c..0000000000 --- a/tests/utils/system.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package utils - -import ( - "context" - "testing" - - "github.com/ChainSafe/gossamer/dot/rpc/modules" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/require" -) - -// GetPeers calls the endpoint system_peers -func GetPeers(ctx context.Context, t *testing.T, rpcPort string) []common.PeerInfo { - endpoint := NewEndpoint(rpcPort) - const method = "system_peers" - const params = "[]" - respBody, err := PostRPC(ctx, endpoint, method, params) - require.NoError(t, err) - - resp := new(modules.SystemPeersResponse) - err = DecodeRPC(t, respBody, resp) - require.NoError(t, err) - require.NotNil(t, resp) - - return *resp -} diff --git a/tests/utils/writer.go b/tests/utils/writer.go new file mode 100644 index 0000000000..47c738b352 --- /dev/null +++ b/tests/utils/writer.go @@ -0,0 +1,30 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package utils + +import ( + "io" + "testing" +) + +// TestWriter is a writer implementing `io.Writer` +// using the Go test logger `t.Log()`. +type TestWriter struct { + t *testing.T +} + +func (tw *TestWriter) Write(p []byte) (n int, err error) { + tw.t.Helper() + line := string(p) + tw.t.Log(line) + return len(p), nil +} + +// NewTestWriter creates a new writer which uses +// the Go test logger to write out. +func NewTestWriter(t *testing.T) (writer io.Writer) { + return &TestWriter{ + t: t, + } +} From 16313f8767a2f30f2f7a102b4749c17f9e05dc55 Mon Sep 17 00:00:00 2001 From: Edward Mack Date: Mon, 13 Jun 2022 12:31:15 -0400 Subject: [PATCH 06/48] testing(dot/sync): create dot sync unit tests (#2261) --- .golangci.yml | 1 + dot/core/service_integration_test.go | 1 + dot/rpc/modules/author_integration_test.go | 3 + dot/sync/benchmark_test.go | 2 + dot/sync/bootstrap_syncer_test.go | 113 ++ dot/sync/chain_processor.go | 18 +- dot/sync/chain_processor_test.go | 950 ++++++++++ dot/sync/chain_sync.go | 169 +- dot/sync/chain_sync_integeration_test.go | 374 +--- dot/sync/chain_sync_test.go | 1561 +++++++++++++++++ dot/sync/disjoint_block_set.go | 2 + .../disjoint_block_set_integeration_test.go | 53 +- dot/sync/disjoint_block_set_test.go | 484 +++++ dot/sync/interface.go | 11 +- dot/sync/message_integeration_test.go | 2 +- dot/sync/message_test.go | 430 +++++ dot/sync/mock_chain_processor_test.go | 58 + dot/sync/mock_chain_sync_test.go | 100 +- dot/sync/mock_disjoint_block_set_test.go | 212 +++ dot/sync/mock_instance_test.go | 404 +++++ dot/sync/mock_interface_test.go | 773 ++++++++ dot/sync/outliers.go | 6 +- dot/sync/outliers_integeration_test.go | 27 - dot/sync/outliers_test.go | 46 + dot/sync/syncer.go | 2 +- dot/sync/syncer_integeration_test.go | 23 +- dot/sync/syncer_test.go | 409 +++++ dot/sync/test_helpers.go | 1 - dot/sync/tip_syncer_integeration_test.go | 49 +- dot/sync/tip_syncer_test.go | 401 +++++ dot/telemetry/mailer_test.go | 1 + 31 files changed, 6197 insertions(+), 489 deletions(-) create mode 100644 dot/sync/bootstrap_syncer_test.go create mode 100644 dot/sync/chain_processor_test.go create mode 100644 dot/sync/chain_sync_test.go create mode 100644 dot/sync/disjoint_block_set_test.go create mode 100644 dot/sync/message_test.go create mode 100644 dot/sync/mock_chain_processor_test.go create mode 100644 dot/sync/mock_disjoint_block_set_test.go create mode 100644 dot/sync/mock_instance_test.go create mode 100644 dot/sync/mock_interface_test.go delete mode 100644 dot/sync/outliers_integeration_test.go create mode 100644 dot/sync/outliers_test.go create mode 100644 dot/sync/syncer_test.go create mode 100644 dot/sync/tip_syncer_test.go diff --git a/.golangci.yml b/.golangci.yml index 1e89644f0c..e4beaa4478 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -59,6 +59,7 @@ linters: - nolintlint - revive - staticcheck + - tparallel - unconvert - unparam - varcheck diff --git a/dot/core/service_integration_test.go b/dot/core/service_integration_test.go index f60dc989cf..cbe534da09 100644 --- a/dot/core/service_integration_test.go +++ b/dot/core/service_integration_test.go @@ -195,6 +195,7 @@ func TestAnnounceBlock(t *testing.T) { } func TestService_InsertKey(t *testing.T) { + t.Parallel() ks := keystore.NewGlobalKeystore() cfg := &Config{ diff --git a/dot/rpc/modules/author_integration_test.go b/dot/rpc/modules/author_integration_test.go index 385243f763..d337d38d9a 100644 --- a/dot/rpc/modules/author_integration_test.go +++ b/dot/rpc/modules/author_integration_test.go @@ -255,6 +255,7 @@ func TestAuthorModule_SubmitExtrinsic_AlreadyInPool(t *testing.T) { } func TestAuthorModule_InsertKey_Integration(t *testing.T) { + t.Parallel() integrationTestController := setupStateAndRuntime(t, t.TempDir(), useInstanceFromGenesis) auth := newAuthorModule(t, integrationTestController) @@ -336,6 +337,7 @@ func TestAuthorModule_InsertKey_Integration(t *testing.T) { } func TestAuthorModule_HasKey_Integration(t *testing.T) { + t.Parallel() integrationTestController := setupStateAndRuntime(t, t.TempDir(), useInstanceFromGenesis) ks := keystore.NewGlobalKeystore() @@ -403,6 +405,7 @@ func TestAuthorModule_HasKey_Integration(t *testing.T) { } func TestAuthorModule_HasSessionKeys_Integration(t *testing.T) { + t.Parallel() integrationTestController := setupStateAndRuntime(t, t.TempDir(), useInstanceFromGenesis) auth := newAuthorModule(t, integrationTestController) diff --git a/dot/sync/benchmark_test.go b/dot/sync/benchmark_test.go index 4be86456bf..fd8e4f93d0 100644 --- a/dot/sync/benchmark_test.go +++ b/dot/sync/benchmark_test.go @@ -15,6 +15,7 @@ func Test_newSyncBenchmarker(t *testing.T) { t.Parallel() t.Run("10 samples to keep", func(t *testing.T) { + t.Parallel() const samplesToKeep = 10 actual := newSyncBenchmarker(samplesToKeep) @@ -27,6 +28,7 @@ func Test_newSyncBenchmarker(t *testing.T) { }) t.Run("panics on 0 sample to keep", func(t *testing.T) { + t.Parallel() const samplesToKeep = 0 assert.PanicsWithValue(t, "cannot have 0 samples to keep", func() { newSyncBenchmarker(samplesToKeep) diff --git a/dot/sync/bootstrap_syncer_test.go b/dot/sync/bootstrap_syncer_test.go new file mode 100644 index 0000000000..8096d29f9e --- /dev/null +++ b/dot/sync/bootstrap_syncer_test.go @@ -0,0 +1,113 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "errors" + "testing" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func Test_bootstrapSyncer_handleWorkerResult(t *testing.T) { + t.Parallel() + mockError := errors.New("mock testing error") + + tests := map[string]struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + worker *worker + wantWorkerToRetry *worker + err error + }{ + "nil worker.err returns nil": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + return NewMockBlockState(ctrl) + }, + worker: &worker{}, + }, + "best block header error": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockHeader().Return(nil, + mockError) + return mockBlockState + }, + worker: &worker{ + err: &workerError{}, + targetNumber: uintPtr(0), + }, + err: mockError, + }, + "targetNumber < bestBlockHeader number returns nil": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) + return mockBlockState + }, + worker: &worker{ + err: &workerError{}, + targetNumber: uintPtr(0), + }, + }, + "targetNumber > bestBlockHeader number worker errUnknownParent, error GetHighestFinalisedHeader": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(nil, mockError) + return mockBlockState + }, + worker: &worker{ + err: &workerError{err: errUnknownParent}, + targetNumber: uintPtr(3), + }, + err: mockError, + }, + "targetNumber > bestBlockHeader number worker errUnknownParent returns worker": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{Number: 1}, nil) + return mockBlockState + }, + worker: &worker{ + err: &workerError{err: errUnknownParent}, + targetNumber: uintPtr(3), + }, + wantWorkerToRetry: &worker{ + startNumber: uintPtr(1), + targetNumber: uintPtr(3), + }, + }, + "targetNumber > bestBlockHeader number returns worker": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) + return mockBlockState + }, + worker: &worker{ + err: &workerError{}, + targetNumber: uintPtr(3), + }, + wantWorkerToRetry: &worker{ + startNumber: uintPtr(3), + targetNumber: uintPtr(3), + }, + }, + } + for testName, tt := range tests { + tt := tt + t.Run(testName, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &bootstrapSyncer{ + blockState: tt.blockStateBuilder(ctrl), + } + gotWorkerToRetry, err := s.handleWorkerResult(tt.worker) + assert.ErrorIs(t, err, tt.err) + assert.Equal(t, tt.wantWorkerToRetry, gotWorkerToRetry) + }) + } +} diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go index c3970131ef..de02d5bd33 100644 --- a/dot/sync/chain_processor.go +++ b/dot/sync/chain_processor.go @@ -14,6 +14,8 @@ import ( "github.com/ChainSafe/gossamer/lib/blocktree" ) +//go:generate mockgen -destination=mock_chain_processor_test.go -package=$GOPACKAGE . ChainProcessor + // ChainProcessor processes ready blocks. // it is implemented by *chainProcessor type ChainProcessor interface { @@ -160,7 +162,7 @@ func (s *chainProcessor) processBlockData(bd *types.BlockData) error { logger.Debugf("processing block data with hash %s", bd.Hash) if bd.Header != nil && bd.Body != nil { - if err := s.handleHeader(bd.Header); err != nil { + if err := s.babeVerifier.VerifyBlock(bd.Header); err != nil { return err } @@ -191,16 +193,6 @@ func (s *chainProcessor) processBlockData(bd *types.BlockData) error { return nil } -// handleHeader handles headers included in BlockResponses -func (s *chainProcessor) handleHeader(header *types.Header) error { - err := s.babeVerifier.VerifyBlock(header) - if err != nil { - return fmt.Errorf("%w: %s", ErrInvalidBlock, err.Error()) - } - - return nil -} - // handleHeader handles block bodies included in BlockResponses func (s *chainProcessor) handleBody(body *types.Body) { for _, ext := range *body { @@ -210,10 +202,6 @@ func (s *chainProcessor) handleBody(body *types.Body) { // handleHeader handles blocks (header+body) included in BlockResponses func (s *chainProcessor) handleBlock(block *types.Block) error { - if block == nil || block.Body == nil { - return errors.New("block or body is nil") - } - parent, err := s.blockState.GetHeader(block.Header.ParentHash) if err != nil { return fmt.Errorf("%w: %s", errFailedToGetParent, err) diff --git a/dot/sync/chain_processor_test.go b/dot/sync/chain_processor_test.go new file mode 100644 index 0000000000..0f9cb6624d --- /dev/null +++ b/dot/sync/chain_processor_test.go @@ -0,0 +1,950 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "context" + "errors" + "testing" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/blocktree" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/runtime/storage" + "github.com/ChainSafe/gossamer/lib/trie" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +//go:generate mockgen -destination=mock_instance_test.go -package=$GOPACKAGE github.com/ChainSafe/gossamer/lib/runtime Instance + +func Test_chainProcessor_handleBlock(t *testing.T) { + t.Parallel() + mockError := errors.New("test mock error") + testHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") + testParentHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") + + tests := map[string]struct { + chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor + block *types.Block + wantErr error + }{ + "handle getHeader error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) + chainProcessor.blockState = mockBlockState + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: errFailedToGetParent, + }, + "handle trieState error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) + chainProcessor.blockState = mockBlockState + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, mockError) + mockStorageState.EXPECT().Unlock() + chainProcessor.storageState = mockStorageState + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: mockError, + }, + "handle getRuntime error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + StateRoot: testHash, + }, nil) + mockBlockState.EXPECT().GetRuntime(&testParentHash).Return(nil, mockError) + chainProcessor.blockState = mockBlockState + trieState := newTrieState(t) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) + mockStorageState.EXPECT().Unlock() + chainProcessor.storageState = mockStorageState + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: mockError, + }, + "handle runtime ExecuteBlock error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + trieState := newTrieState(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + StateRoot: testHash, + }, nil) + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(trieState) + mockInstance.EXPECT().ExecuteBlock(&types.Block{Body: types.Body{}}).Return(nil, mockError) + mockBlockState.EXPECT().GetRuntime(&testParentHash).Return(mockInstance, nil) + chainProcessor.blockState = mockBlockState + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) + mockStorageState.EXPECT().Unlock() + chainProcessor.storageState = mockStorageState + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: mockError, + }, + "handle block import error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + trieState := newTrieState(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + StateRoot: testHash, + }, nil) + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(trieState) + mockInstance.EXPECT().ExecuteBlock(&types.Block{Body: types.Body{}}).Return(nil, nil) + mockBlockState.EXPECT().GetRuntime(&testParentHash).Return(mockInstance, nil) + chainProcessor.blockState = mockBlockState + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) + mockStorageState.EXPECT().Unlock() + chainProcessor.storageState = mockStorageState + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(&types.Block{Body: types.Body{}}, + trieState).Return(mockError) + chainProcessor.blockImportHandler = mockBlockImportHandler + return + }, + block: &types.Block{ + Body: types.Body{}, + }, + wantErr: mockError, + }, + "base case": { + chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { + mockBlock := &types.Block{ + Body: types.Body{}, // empty slice of extrinsics + } + trieState := newTrieState(t) + mockBlockState := NewMockBlockState(ctrl) + mockHeader := &types.Header{ + Number: 0, + StateRoot: trie.EmptyHash, + } + mockHeaderHash := mockHeader.Hash() + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(mockHeader, nil) + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(trieState) + mockInstance.EXPECT().ExecuteBlock(mockBlock) + mockBlockState.EXPECT().GetRuntime(&mockHeaderHash).Return(mockInstance, nil) + chainProcessor.blockState = mockBlockState + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().Unlock() + mockStorageState.EXPECT().TrieState(&trie.EmptyHash).Return(trieState, nil) + chainProcessor.storageState = mockStorageState + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, trieState).Return(nil) + chainProcessor.blockImportHandler = mockBlockImportHandler + mockTelemetry := NewMockClient(ctrl) + mockTelemetry.EXPECT().SendMessage(gomock.Any()) + chainProcessor.telemetry = mockTelemetry + return + }, + block: &types.Block{ + Header: types.Header{ + Number: 0, + }, + Body: types.Body{}, + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := tt.chainProcessorBuilder(ctrl) + + err := s.handleBlock(tt.block) + assert.ErrorIs(t, err, tt.wantErr) + }) + } + t.Run("panics on different parent state root", func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + bock := &types.Block{ + Header: types.Header{ + ParentHash: common.Hash{1}, + }, + } + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().GetHeader(common.Hash{1}). + Return(&types.Header{StateRoot: common.Hash{2}}, nil) + trieState := newTrieState(t) + storageState := NewMockStorageState(ctrl) + lockCall := storageState.EXPECT().Lock() + trieStateCall := storageState.EXPECT().TrieState(&common.Hash{2}). + Return(trieState, nil).After(lockCall) + storageState.EXPECT().Unlock().After(trieStateCall) + chainProcessor := &chainProcessor{ + blockState: blockState, + storageState: storageState, + } + const expectedPanicValue = "parent state root does not match snapshot state root" + assert.PanicsWithValue(t, expectedPanicValue, func() { + _ = chainProcessor.handleBlock(bock) + }) + }) +} + +func newTrieState(t *testing.T) *storage.TrieState { + t.Helper() + trieState, err := storage.NewTrieState(nil) + require.NoError(t, err) + return trieState +} + +func Test_chainProcessor_handleBody(t *testing.T) { + t.Parallel() + + testExtrinsics := []types.Extrinsic{{1, 2, 3}, {7, 8, 9, 0}, {0xa, 0xb}} + testBody := types.NewBody(testExtrinsics) + + t.Run("base case", func(t *testing.T) { + ctrl := gomock.NewController(t) + mockTransactionState := NewMockTransactionState(ctrl) + mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[0]) + mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[1]) + mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[2]) + processor := chainProcessor{ + transactionState: mockTransactionState, + } + processor.handleBody(testBody) + }) +} + +func Test_chainProcessor_handleJustification(t *testing.T) { + t.Parallel() + + expectedHash := common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a") + + type args struct { + header *types.Header + justification []byte + } + tests := map[string]struct { + chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor + args args + }{ + "nil justification and header": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + return chainProcessor{} + }, + }, + "invalid justification": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(expectedHash, []byte(`x`)).Return(errors.New("error")) + return chainProcessor{ + finalityGadget: mockFinalityGadget, + } + }, + args: args{ + header: &types.Header{ + Number: 0, + }, + justification: []byte(`x`), + }, + }, + "set justification error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().SetJustification(expectedHash, []byte(`xx`)).Return(errors.New("fake error")) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(expectedHash, []byte(`xx`)).Return(nil) + return chainProcessor{ + blockState: mockBlockState, + finalityGadget: mockFinalityGadget, + } + }, + args: args{ + header: &types.Header{ + Number: 0, + }, + justification: []byte(`xx`), + }, + }, + "base case set": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().SetJustification(expectedHash, []byte(`1234`)).Return(nil) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(expectedHash, []byte(`1234`)).Return(nil) + return chainProcessor{ + blockState: mockBlockState, + finalityGadget: mockFinalityGadget, + } + }, + args: args{ + header: &types.Header{ + Number: 0, + }, + justification: []byte(`1234`), + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := tt.chainProcessorBuilder(ctrl) + s.handleJustification(tt.args.header, tt.args.justification) + }) + } +} + +func Test_chainProcessor_processBlockData(t *testing.T) { + t.Parallel() + + mockError := errors.New("mock test error") + justification := []byte{0, 1, 2} + + tests := map[string]struct { + chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor + blockData *types.BlockData + expectedError error + }{ + "nil block data": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + return chainProcessor{} + }, + blockData: nil, + expectedError: ErrNilBlockData, + }, + "handle has header error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, mockError) + return chainProcessor{ + blockState: mockBlockState, + } + }, + blockData: &types.BlockData{}, + expectedError: mockError, + }, + "handle has block body error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, mockError) + return chainProcessor{ + blockState: mockBlockState, + } + }, + blockData: &types.BlockData{}, + expectedError: mockError, + }, + "handle getBlockByHash error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(nil, mockError) + return chainProcessor{ + blockState: mockBlockState, + } + }, + blockData: &types.BlockData{}, + expectedError: mockError, + }, + "handle AddBlockToBlockTree error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlock := &types.Block{ + Header: types.Header{ + Number: uint(1), + }, + } + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(mockBlock, nil) + mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{ + Header: types.Header{Number: 1}}).Return(blocktree.ErrBlockExists) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + return chainProcessor{ + blockState: mockBlockState, + finalityGadget: mockFinalityGadget, + storageState: mockStorageState, + blockImportHandler: mockBlockImportHandler, + } + }, + blockData: &types.BlockData{ + Justification: &[]byte{1, 2, 3}, + }, + }, + "handle block data justification != nil": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlock := &types.Block{ + Header: types.Header{ + Number: uint(1), + }, + } + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(mockBlock, nil) + mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{ + Header: types.Header{Number: 1}}).Return(nil) + mockBlockState.EXPECT().SetJustification(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, 3}) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, + 3}) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, nil) + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, + nil).Return(nil) + return chainProcessor{ + blockState: mockBlockState, + finalityGadget: mockFinalityGadget, + storageState: mockStorageState, + blockImportHandler: mockBlockImportHandler, + } + }, + blockData: &types.BlockData{ + Justification: &[]byte{1, 2, 3}, + }, + }, + "handle trie state error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlock := &types.Block{ + Header: types.Header{ + Number: uint(1), + }, + } + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(mockBlock, nil) + mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{ + Header: types.Header{Number: 1}}).Return(nil) + mockBlockState.EXPECT().SetJustification(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, 3}) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, + 3}) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, mockError) + return chainProcessor{ + blockState: mockBlockState, + finalityGadget: mockFinalityGadget, + storageState: mockStorageState, + } + }, + blockData: &types.BlockData{ + Justification: &[]byte{1, 2, 3}, + }, + expectedError: mockError, + }, + "handle block import handler error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlock := &types.Block{ + Header: types.Header{ + Number: uint(1), + }, + } + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) + mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(mockBlock, nil) + mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{ + Header: types.Header{Number: 1}}).Return(nil) + mockBlockState.EXPECT().SetJustification(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, 3}) + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, + 3}) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, nil) + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, + nil).Return(mockError) + return chainProcessor{ + blockState: mockBlockState, + finalityGadget: mockFinalityGadget, + storageState: mockStorageState, + blockImportHandler: mockBlockImportHandler, + } + }, + blockData: &types.BlockData{ + Justification: &[]byte{1, 2, 3}, + }, + }, + "has header body false": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).Return(nil) + return chainProcessor{ + blockState: mockBlockState, + } + }, + blockData: &types.BlockData{}, + }, + "handle babe verify block error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(mockError) + return chainProcessor{ + blockState: mockBlockState, + babeVerifier: mockBabeVerifier, + } + }, + blockData: &types.BlockData{ + Header: &types.Header{}, + Body: &types.Body{}, + }, + expectedError: mockError, + }, + "handle error with handleBlock": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) + return chainProcessor{ + blockState: mockBlockState, + babeVerifier: mockBabeVerifier, + } + }, + blockData: &types.BlockData{ + Header: &types.Header{}, + Body: &types.Body{}, + }, + expectedError: errFailedToGetParent, + }, + "error adding block": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{1, 2, 3}).Return(true, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{1, 2, 3}).Return(true, nil) + mockBlockState.EXPECT().GetBlockByHash(common.Hash{1, 2, 3}).Return(&types.Block{ + Header: types.Header{ + Number: uint(1), + }, + }, nil) + mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{ + Header: types.Header{Number: 1}}).Return(mockError) + return chainProcessor{ + blockState: mockBlockState, + } + }, + blockData: &types.BlockData{ + Hash: common.Hash{1, 2, 3}, + }, + expectedError: mockError, + }, + "handle block import": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockTrieState := newTrieState(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{1, 2, 3}).Return(true, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{1, 2, 3}).Return(true, nil) + mockBlockState.EXPECT().GetBlockByHash(common.Hash{1, 2, 3}).Return(&types.Block{ + Header: types.Header{ + Number: uint(1), + }, + }, nil) + mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{Header: types.Header{Number: 1}}).Return(nil) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(mockTrieState, nil) + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(&types.Block{Header: types.Header{Number: 1}}, mockTrieState) + return chainProcessor{ + blockState: mockBlockState, + storageState: mockStorageState, + blockImportHandler: mockBlockImportHandler, + } + }, + blockData: &types.BlockData{ + Hash: common.Hash{1, 2, 3}, + }, + }, + "handle compareAndSetBlockData error": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).Return(mockError) + return chainProcessor{ + blockState: mockBlockState, + } + }, + blockData: &types.BlockData{}, + expectedError: mockError, + }, + "handle header": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + stateRootHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") + mockTrieState := newTrieState(t) + runtimeHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(mockTrieState) + mockInstance.EXPECT().ExecuteBlock(&types.Block{Header: types.Header{}, Body: types.Body{}}) + + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + Number: 0, + StateRoot: stateRootHash, + }, nil) + mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{Header: &types.Header{}, Body: &types.Body{}}) + mockBlockState.EXPECT().GetRuntime(&runtimeHash).Return(mockInstance, nil) + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&stateRootHash).Return(mockTrieState, nil) + mockStorageState.EXPECT().Unlock() + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport(&types.Block{ + Header: types.Header{}, Body: types.Body{}}, mockTrieState) + mockTelemetry := NewMockClient(ctrl) + mockTelemetry.EXPECT().SendMessage(gomock.Any()).AnyTimes() + return chainProcessor{ + blockState: mockBlockState, + babeVerifier: mockBabeVerifier, + storageState: mockStorageState, + blockImportHandler: mockBlockImportHandler, + telemetry: mockTelemetry, + } + }, + blockData: &types.BlockData{ + Header: &types.Header{ + Number: 0, + }, + Body: &types.Body{}, + }, + }, + "handle justification": { + chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { + stateRootHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") + runtimeHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") + mockTrieState, _ := storage.NewTrieState(nil) + mockInstance := NewMockInstance(ctrl) + mockInstance.EXPECT().SetContextStorage(mockTrieState) + mockInstance.EXPECT().ExecuteBlock(&types.Block{Header: types.Header{}, Body: types.Body{}}) + + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + Number: 0, + StateRoot: stateRootHash, + }, nil) + mockBlockState.EXPECT().SetJustification( + common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), justification) + mockBlockState.EXPECT().CompareAndSetBlockData(gomock.AssignableToTypeOf(&types.BlockData{})) + mockBlockState.EXPECT().GetRuntime(&runtimeHash).Return(mockInstance, nil) + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}) + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().TrieState(&stateRootHash).Return(mockTrieState, nil) + mockStorageState.EXPECT().Unlock() + mockBlockImportHandler := NewMockBlockImportHandler(ctrl) + mockBlockImportHandler.EXPECT().HandleBlockImport( + &types.Block{Header: types.Header{}, Body: types.Body{}}, mockTrieState) + mockTelemetry := NewMockClient(ctrl) + mockTelemetry.EXPECT().SendMessage(gomock.Any()).AnyTimes() + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification( + common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), justification) + return chainProcessor{ + blockState: mockBlockState, + babeVerifier: mockBabeVerifier, + storageState: mockStorageState, + blockImportHandler: mockBlockImportHandler, + telemetry: mockTelemetry, + finalityGadget: mockFinalityGadget, + } + }, + blockData: &types.BlockData{ + Header: &types.Header{ + Number: 0, + }, + Body: &types.Body{}, + Justification: &justification, + }, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + processor := tt.chainProcessorBuilder(ctrl) + err := processor.processBlockData(tt.blockData) + assert.ErrorIs(t, err, tt.expectedError) + }) + } +} + +func Test_chainProcessor_processReadyBlocks(t *testing.T) { + t.Parallel() + mockError := errors.New("test mock error") + tests := map[string]struct { + blockStateBuilder func(ctrl *gomock.Controller, done chan struct{}) BlockState + blockData *types.BlockData + babeVerifierBuilder func(ctrl *gomock.Controller) BabeVerifier + pendingBlockBuilder func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet + storageStateBuilder func(ctrl *gomock.Controller, done chan struct{}) StorageState + }{ + "base case": { + blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).DoAndReturn(func(*types. + BlockData) error { + close(done) + return nil + }) + return mockBlockState + }, + blockData: &types.BlockData{ + Hash: common.Hash{}, + }, + babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { + return nil + }, + pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { + return nil + }, + storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { + return nil + }, + }, + "add block": { + blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) + return mockBlockState + }, + blockData: &types.BlockData{ + Hash: common.Hash{}, + Header: &types.Header{}, + Body: &types.Body{}, + }, + babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) + return mockBabeVerifier + }, + pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { + mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) + mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ + Header: types.Header{}, + Body: types.Body{}, + }).DoAndReturn(func(block *types.Block) error { + close(done) + return nil + }) + return mockDisjointBlockSet + }, + storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { + return nil + }, + }, + "error in process block": { + blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) + return mockBlockState + }, + blockData: &types.BlockData{ + Hash: common.Hash{}, + Header: &types.Header{}, + Body: &types.Body{}, + }, + babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) + return mockBabeVerifier + }, + pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { + return nil + }, + storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { + mockStorageState := NewMockStorageState(ctrl) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().Unlock() + mockStorageState.EXPECT().TrieState(&common.Hash{}).DoAndReturn(func(hash *common.Hash) (*storage. + TrieState, error) { + close(done) + return nil, mockError + }) + return mockStorageState + }, + }, + "add block error": { + blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) + return mockBlockState + }, + blockData: &types.BlockData{ + Hash: common.Hash{}, + Header: &types.Header{}, + Body: &types.Body{}, + }, + babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) + return mockBabeVerifier + }, + pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { + mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) + mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ + Header: types.Header{}, + Body: types.Body{}, + }).DoAndReturn(func(block *types.Block) error { + close(done) + return mockError + }) + return mockDisjointBlockSet + }, + storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { + return nil + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + ctx, cancel := context.WithCancel(context.Background()) + readyBlock := newBlockQueue(5) + done := make(chan struct{}) + + s := &chainProcessor{ + ctx: ctx, + cancel: cancel, + readyBlocks: readyBlock, + blockState: tt.blockStateBuilder(ctrl, done), + babeVerifier: tt.babeVerifierBuilder(ctrl), + pendingBlocks: tt.pendingBlockBuilder(ctrl, done), + storageState: tt.storageStateBuilder(ctrl, done), + } + + go s.processReadyBlocks() + + readyBlock.push(tt.blockData) + <-done + s.cancel() + }) + } +} + +func Test_newChainProcessor(t *testing.T) { + t.Parallel() + + mockReadyBlock := newBlockQueue(5) + mockDisjointBlockSet := NewMockDisjointBlockSet(nil) + mockBlockState := NewMockBlockState(nil) + mockStorageState := NewMockStorageState(nil) + mockTransactionState := NewMockTransactionState(nil) + mockBabeVerifier := NewMockBabeVerifier(nil) + mockFinalityGadget := NewMockFinalityGadget(nil) + mockBlockImportHandler := NewMockBlockImportHandler(nil) + + type args struct { + readyBlocks *blockQueue + pendingBlocks DisjointBlockSet + blockState BlockState + storageState StorageState + transactionState TransactionState + babeVerifier BabeVerifier + finalityGadget FinalityGadget + blockImportHandler BlockImportHandler + } + tests := []struct { + name string + args args + want *chainProcessor + }{ + { + name: "with args", + args: args{ + readyBlocks: mockReadyBlock, + pendingBlocks: mockDisjointBlockSet, + blockState: mockBlockState, + storageState: mockStorageState, + transactionState: mockTransactionState, + babeVerifier: mockBabeVerifier, + finalityGadget: mockFinalityGadget, + blockImportHandler: mockBlockImportHandler, + }, + want: &chainProcessor{ + readyBlocks: mockReadyBlock, + pendingBlocks: mockDisjointBlockSet, + blockState: mockBlockState, + storageState: mockStorageState, + transactionState: mockTransactionState, + babeVerifier: mockBabeVerifier, + finalityGadget: mockFinalityGadget, + blockImportHandler: mockBlockImportHandler, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got := newChainProcessor(tt.args.readyBlocks, tt.args.pendingBlocks, tt.args.blockState, + tt.args.storageState, tt.args.transactionState, tt.args.babeVerifier, tt.args.finalityGadget, + tt.args.blockImportHandler, nil) + assert.NotNil(t, got.ctx) + got.ctx = nil + assert.NotNil(t, got.cancel) + got.cancel = nil + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 7a6e00828d..a31b1f376f 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -88,7 +88,7 @@ type workHandler interface { handleTick() ([]*worker, error) } -//go:generate mockgen -destination=mock_chain_sync_test.go -package $GOPACKAGE . ChainSync +//go:generate mockgen -destination=mock_chain_sync_test.go -package $GOPACKAGE -source chain_sync.go . ChainSync,workHandler // ChainSync contains the methods used by the high-level service into the `chainSync` module type ChainSync interface { @@ -161,6 +161,8 @@ type chainSync struct { minPeers int maxWorkerRetries uint16 slotDuration time.Duration + + logSyncPeriod time.Duration } type chainSyncConfig struct { @@ -175,6 +177,7 @@ type chainSyncConfig struct { func newChainSync(cfg *chainSyncConfig) *chainSync { ctx, cancel := context.WithCancel(context.Background()) const syncSamplesToKeep = 30 + const logSyncPeriod = 5 * time.Second return &chainSync{ ctx: ctx, cancel: cancel, @@ -194,6 +197,7 @@ func newChainSync(cfg *chainSyncConfig) *chainSync { minPeers: cfg.minPeers, maxWorkerRetries: uint16(cfg.maxPeers), slotDuration: cfg.slotDuration, + logSyncPeriod: logSyncPeriod, } } @@ -267,7 +271,7 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error // check if they are on a fork or not head, err := cs.blockState.BestBlockHeader() if err != nil { - return err + return fmt.Errorf("best block header: %w", err) } if ps.number <= head.Number { @@ -275,7 +279,7 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error // as we already have that block ourHash, err := cs.blockState.GetHashByNumber(ps.number) if err != nil { - return err + return fmt.Errorf("get block hash by number: %w", err) } if ourHash.Equal(ps.hash) { @@ -287,7 +291,7 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error // for now, we can remove them from the syncing peers set fin, err := cs.blockState.GetHighestFinalisedHeader() if err != nil { - return err + return fmt.Errorf("get highest finalised header: %w", err) } // their block hash doesn't match ours for that number (ie. they are on a different @@ -300,14 +304,15 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error Value: peerset.BadBlockAnnouncementValue, Reason: peerset.BadBlockAnnouncementReason, }, p) - return errPeerOnInvalidFork + return fmt.Errorf("%w: for peer %s and block number %d", + errPeerOnInvalidFork, p, ps.number) } // peer is on a fork, check if we have processed the fork already or not // ie. is their block written to our db? has, err := cs.blockState.HasHeader(ps.hash) if err != nil { - return err + return fmt.Errorf("has header: %w", err) } // if so, do nothing, as we already have their fork @@ -319,7 +324,7 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error // the peer has a higher best block than us, or they are on some fork we are not aware of // add it to the disjoint block set if err = cs.pendingBlocks.addHashAndNumber(ps.hash, ps.number); err != nil { - return err + return fmt.Errorf("add hash and number: %w", err) } cs.workQueue <- ps @@ -328,7 +333,7 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error } func (cs *chainSync) logSyncSpeed() { - t := time.NewTicker(time.Second * 5) + t := time.NewTicker(cs.logSyncPeriod) defer t.Stop() for { @@ -411,77 +416,9 @@ func (cs *chainSync) sync() { logger.Errorf("failed to handle chain sync work: %s", err) } case res := <-cs.resultQueue: - // delete worker from workers map - cs.workerState.delete(res.id) - - // handle results from worker - // if there is an error, potentially retry the worker - if res.err == nil || res.ctx.Err() != nil { - continue + if err := cs.handleResult(res); err != nil { + logger.Errorf("failed to handle chain sync result: %s", err) } - - logger.Debugf("worker id %d failed: %s", res.id, res.err.err) - - // handle errors. in the case that a peer did not respond to us in time, - // temporarily add them to the ignore list. - switch { - case errors.Is(res.err.err, context.Canceled): - return - case errors.Is(res.err.err, errNoPeers): - logger.Debugf("worker id %d not able to sync with any peer", res.id) - continue - case errors.Is(res.err.err, context.DeadlineExceeded): - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.TimeOutValue, - Reason: peerset.TimeOutReason, - }, res.err.who) - cs.ignorePeer(res.err.who) - case strings.Contains(res.err.err.Error(), "dial backoff"): - cs.ignorePeer(res.err.who) - continue - case res.err.err.Error() == "protocol not supported": - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, res.err.who) - cs.ignorePeer(res.err.who) - continue - default: - } - - worker, err := cs.handler.handleWorkerResult(res) - if err != nil { - logger.Errorf("failed to handle worker result: %s", err) - continue - } else if worker == nil { - continue - } - - worker.retryCount = res.retryCount + 1 - if worker.retryCount > cs.maxWorkerRetries { - logger.Debugf( - "discarding worker id %d: maximum retry count reached", - worker.id) - - // if this worker was triggered due to a block in the pending blocks set, - // we want to remove it from the set, as we asked all our peers for it - // and none replied with the info we need. - if worker.pendingBlock != nil { - cs.pendingBlocks.removeBlock(worker.pendingBlock.hash) - } - continue - } - - // if we've already tried a peer and there was an error, - // then we shouldn't try them again. - if res.peersTried != nil { - worker.peersTried = res.peersTried - } else { - worker.peersTried = make(map[peer.ID]struct{}) - } - - worker.peersTried[res.err.who] = struct{}{} - cs.tryDispatchWorker(worker) case <-ticker.C: cs.maybeSwitchMode() @@ -525,6 +462,80 @@ func (cs *chainSync) maybeSwitchMode() { } } +func (cs *chainSync) handleResult(resultWorker *worker) error { + // delete worker from workers map + cs.workerState.delete(resultWorker.id) + + // handle results from worker + // if there is an error, potentially retry the worker + if resultWorker.err == nil || resultWorker.ctx.Err() != nil { + return nil + } + + logger.Debugf("worker id %d failed: %s", resultWorker.id, resultWorker.err.err) + + // handle errors. in the case that a peer did not respond to us in time, + // temporarily add them to the ignore list. + switch { + case errors.Is(resultWorker.err.err, context.Canceled): + return nil + case errors.Is(resultWorker.err.err, errNoPeers): + logger.Debugf("worker id %d not able to sync with any peer", resultWorker.id) + return nil + case errors.Is(resultWorker.err.err, context.DeadlineExceeded): + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.TimeOutValue, + Reason: peerset.TimeOutReason, + }, resultWorker.err.who) + cs.ignorePeer(resultWorker.err.who) + case strings.Contains(resultWorker.err.err.Error(), "dial backoff"): + cs.ignorePeer(resultWorker.err.who) + return nil + case resultWorker.err.err.Error() == "protocol not supported": + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadProtocolValue, + Reason: peerset.BadProtocolReason, + }, resultWorker.err.who) + cs.ignorePeer(resultWorker.err.who) + return nil + } + + worker, err := cs.handler.handleWorkerResult(resultWorker) + if err != nil { + logger.Errorf("failed to handle worker result: %s", err) + return err + } else if worker == nil { + return nil + } + + worker.retryCount = resultWorker.retryCount + 1 + if worker.retryCount > cs.maxWorkerRetries { + logger.Debugf( + "discarding worker id %d: maximum retry count %d reached", + worker.id, cs.maxWorkerRetries) + + // if this worker was triggered due to a block in the pending blocks set, + // we want to remove it from the set, as we asked all our peers for it + // and none replied with the info we need. + if worker.pendingBlock != nil { + cs.pendingBlocks.removeBlock(worker.pendingBlock.hash) + } + return nil + } + + // if we've already tried a peer and there was an error, + // then we shouldn't try them again. + if resultWorker.peersTried != nil { + worker.peersTried = resultWorker.peersTried + } else { + worker.peersTried = make(map[peer.ID]struct{}) + } + + worker.peersTried[resultWorker.err.who] = struct{}{} + cs.tryDispatchWorker(worker) + return nil +} + // setMode stops all existing workers and clears the worker set and switches the `handler` // based on the new mode, if the mode is different than previous func (cs *chainSync) setMode(mode chainSyncState) { @@ -568,7 +579,7 @@ func (cs *chainSync) getTarget() uint { uintArr = append(uintArr, ps.number) } - sum, count := removeOutliers(uintArr) + sum, count := nonOutliersSumCount(uintArr) quotientBigInt := big.NewInt(0).Div(sum, big.NewInt(int64(count))) return uint(quotientBigInt.Uint64()) } diff --git a/dot/sync/chain_sync_integeration_test.go b/dot/sync/chain_sync_integeration_test.go index d5e12f2499..4c28b52220 100644 --- a/dot/sync/chain_sync_integeration_test.go +++ b/dot/sync/chain_sync_integeration_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration // Copyright 2021 ChainSafe Systems (ON) // SPDX-License-Identifier: LGPL-3.0-only @@ -14,151 +13,40 @@ import ( "time" "github.com/ChainSafe/gossamer/dot/network" - syncmocks "github.com/ChainSafe/gossamer/dot/sync/mocks" + "github.com/ChainSafe/gossamer/dot/peerset" + "github.com/ChainSafe/gossamer/dot/sync/mocks" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/variadic" "github.com/ChainSafe/gossamer/lib/trie" - + "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -const ( - defaultMinPeers = 1 - defaultMaxPeers = 5 - testTimeout = time.Second * 5 - defaultSlotDuration = time.Second * 6 -) - -func newTestChainSync(t *testing.T) (*chainSync, *blockQueue) { - header, err := types.NewHeader(common.NewHash([]byte{0}), - trie.EmptyHash, trie.EmptyHash, 0, types.NewDigest()) - require.NoError(t, err) - - bs := new(syncmocks.BlockState) - bs.On("BestBlockHeader").Return(header, nil) - bs.On("GetFinalisedNotifierChannel").Return(make(chan *types.FinalisationInfo, 128), nil) - bs.On("HasHeader", mock.AnythingOfType("common.Hash")).Return(true, nil) - - net := new(syncmocks.Network) - net.On("DoBlockRequest", mock.AnythingOfType("peer.ID"), - mock.AnythingOfType("*network.BlockRequestMessage")).Return(nil, nil) - net.On("ReportPeer", mock.AnythingOfType("peerset.ReputationChange"), mock.AnythingOfType("peer.ID")) - - readyBlocks := newBlockQueue(maxResponseSize) - - cfg := &chainSyncConfig{ - bs: bs, - net: net, - readyBlocks: readyBlocks, - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - minPeers: defaultMinPeers, - maxPeers: defaultMaxPeers, - slotDuration: defaultSlotDuration, - } - - cs := newChainSync(cfg) - return cs, readyBlocks -} - -func TestChainSync_SetPeerHead(t *testing.T) { - cs, _ := newTestChainSync(t) - - testPeer := peer.ID("noot") - hash := common.Hash{0xa, 0xb} - const number = 1000 - err := cs.setPeerHead(testPeer, hash, number) - require.NoError(t, err) - - expected := &peerState{ - who: testPeer, - hash: hash, - number: number, - } - require.Equal(t, expected, cs.peerState[testPeer]) - require.Equal(t, expected, <-cs.workQueue) - require.True(t, cs.pendingBlocks.hasBlock(hash)) - - // test case where peer has a lower head than us, but they are on the same chain as us - cs.blockState = new(syncmocks.BlockState) - header, err := types.NewHeader(common.NewHash([]byte{0}), - trie.EmptyHash, trie.EmptyHash, number, types.NewDigest()) - require.NoError(t, err) - cs.blockState.(*syncmocks.BlockState).On("BestBlockHeader").Return(header, nil) - fin, err := types.NewHeader(common.NewHash([]byte{0}), - trie.EmptyHash, trie.EmptyHash, number-2, types.NewDigest()) - require.NoError(t, err) - cs.blockState.(*syncmocks.BlockState).On("GetHighestFinalisedHeader").Return(fin, nil) - cs.blockState.(*syncmocks.BlockState).On("GetHashByNumber", mock.AnythingOfType("uint")).Return(hash, nil) - - err = cs.setPeerHead(testPeer, hash, number-1) - require.NoError(t, err) - expected = &peerState{ - who: testPeer, - hash: hash, - number: number - 1, - } - require.Equal(t, expected, cs.peerState[testPeer]) - select { - case <-cs.workQueue: - t.Fatal("should not put chain we already have into work queue") - default: - } - - // test case where peer has a lower head than us, and they are on an invalid fork - cs.blockState = new(syncmocks.BlockState) - cs.blockState.(*syncmocks.BlockState).On("BestBlockHeader").Return(header, nil) - fin, err = types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, number, types.NewDigest()) +func TestChainSync_sync_bootstrap_withWorkerError_Integration(t *testing.T) { + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) + mockBlockState := NewMockBlockState(ctrl) + mockHeader, err := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, + types.NewDigest()) require.NoError(t, err) - cs.blockState.(*syncmocks.BlockState).On("GetHighestFinalisedHeader").Return(fin, nil) - cs.blockState.(*syncmocks.BlockState).On("GetHashByNumber", mock.AnythingOfType("uint")).Return(common.Hash{}, nil) - - err = cs.setPeerHead(testPeer, hash, number-1) - require.True(t, errors.Is(err, errPeerOnInvalidFork)) - expected = &peerState{ - who: testPeer, - hash: hash, - number: number - 1, - } - require.Equal(t, expected, cs.peerState[testPeer]) - select { - case <-cs.workQueue: - t.Fatal("should not put invalid fork into work queue") - default: - } - - // test case where peer has a lower head than us, but they are on a valid fork (that is not our chain) - cs.blockState = new(syncmocks.BlockState) - cs.blockState.(*syncmocks.BlockState).On("BestBlockHeader").Return(header, nil) - fin, err = types.NewHeader( - common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, - number-2, types.NewDigest()) - require.NoError(t, err) - cs.blockState.(*syncmocks.BlockState).On("GetHighestFinalisedHeader").Return(fin, nil) - cs.blockState.(*syncmocks.BlockState).On("GetHashByNumber", mock.AnythingOfType("uint")).Return(common.Hash{}, nil) - cs.blockState.(*syncmocks.BlockState).On("HasHeader", mock.AnythingOfType("common.Hash")).Return(true, nil) - - err = cs.setPeerHead(testPeer, hash, number-1) - require.NoError(t, err) - expected = &peerState{ - who: testPeer, - hash: hash, - number: number - 1, - } - require.Equal(t, expected, cs.peerState[testPeer]) - select { - case <-cs.workQueue: - t.Fatal("should not put fork we already have into work queue") - default: - } -} - -func TestChainSync_sync_bootstrap_withWorkerError(t *testing.T) { - cs, _ := newTestChainSync(t) + mockBlockState.EXPECT().BestBlockHeader().Return(mockHeader, nil).Times(2) + cs.blockState = mockBlockState + cs.handler = newBootstrapSyncer(mockBlockState) + + mockNetwork := NewMockNetwork(ctrl) + startingBlock := variadic.MustNewUint32OrHash(1) + max := uint32(128) + mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ + RequestedData: 19, + StartingBlock: *startingBlock, + EndBlockHash: nil, + Direction: 0, + Max: &max, + }) + cs.network = mockNetwork go cs.sync() defer cs.cancel() @@ -177,21 +65,22 @@ func TestChainSync_sync_bootstrap_withWorkerError(t *testing.T) { who: testPeer, } require.Equal(t, expected, res.err) - case <-time.After(testTimeout): + case <-time.After(5 * time.Second): t.Fatal("did not get worker response") } require.Equal(t, bootstrap, cs.state) } -func TestChainSync_sync_tip(t *testing.T) { - cs, _ := newTestChainSync(t) - cs.blockState = new(syncmocks.BlockState) - header, err := types.NewHeader(common.NewHash([]byte{0}), - trie.EmptyHash, trie.EmptyHash, 1000, types.NewDigest()) +func TestChainSync_sync_tip_Integration(t *testing.T) { + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) + cs.blockState = new(mocks.BlockState) + header, err := types.NewHeader(common.Hash{0}, trie.EmptyHash, trie.EmptyHash, 1000, + types.NewDigest()) require.NoError(t, err) - cs.blockState.(*syncmocks.BlockState).On("BestBlockHeader").Return(header, nil) - cs.blockState.(*syncmocks.BlockState).On("GetHighestFinalisedHeader").Return(header, nil) + cs.blockState.(*mocks.BlockState).On("BestBlockHeader").Return(header, nil) + cs.blockState.(*mocks.BlockState).On("GetHighestFinalisedHeader").Return(header, nil) go cs.sync() defer cs.cancel() @@ -206,48 +95,7 @@ func TestChainSync_sync_tip(t *testing.T) { require.Equal(t, tip, cs.state) } -func TestChainSync_getTarget(t *testing.T) { - cs, _ := newTestChainSync(t) - - cs.peerState = map[peer.ID]*peerState{ - "a": { - number: 0, // outlier - }, - "b": { - number: 110, - }, - "c": { - number: 120, - }, - "d": { - number: 130, - }, - "e": { - number: 140, - }, - "f": { - number: 150, - }, - "g": { - number: 1000, // outlier - }, - } - - require.Equal(t, uint(130), cs.getTarget()) // sum:650/count:5 = avg:130 - - cs.peerState = map[peer.ID]*peerState{ - "testA": { - number: 1000, - }, - "testB": { - number: 2000, - }, - } - - require.Equal(t, uint(1500), cs.getTarget()) -} - -func TestWorkerToRequests(t *testing.T) { +func TestWorkerToRequests_Integration(t *testing.T) { w := &worker{ startNumber: uintPtr(10), targetNumber: uintPtr(1), @@ -455,7 +303,15 @@ func TestWorkerToRequests(t *testing.T) { } func TestValidateBlockData(t *testing.T) { - cs, _ := newTestChainSync(t) + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) + mockNetwork := NewMockNetwork(ctrl) + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: -1048576, + Reason: "Incomplete header", + }, peer.ID("")) + cs.network = mockNetwork + req := &network.BlockRequestMessage{ RequestedData: bootstrapRequestData, } @@ -478,8 +334,13 @@ func TestValidateBlockData(t *testing.T) { require.NoError(t, err) } -func TestChainSync_validateResponse(t *testing.T) { - cs, _ := newTestChainSync(t) +func TestChainSync_validateResponse_Integration(t *testing.T) { + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil).Times(4) + cs.blockState = mockBlockState + err := cs.validateResponse(nil, nil, "") require.Equal(t, errEmptyBlockData, err) @@ -588,9 +449,10 @@ func TestChainSync_validateResponse(t *testing.T) { require.False(t, cs.pendingBlocks.hasBlock(hash)) } -func TestChainSync_validateResponse_firstBlock(t *testing.T) { - cs, _ := newTestChainSync(t) - bs := new(syncmocks.BlockState) +func TestChainSync_validateResponse_firstBlock_Integration(t *testing.T) { + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) + bs := new(mocks.BlockState) bs.On("HasHeader", mock.AnythingOfType("common.Hash")).Return(false, nil) cs.blockState = bs @@ -624,9 +486,11 @@ func TestChainSync_validateResponse_firstBlock(t *testing.T) { require.NotNil(t, bd.justification) } -func TestChainSync_doSync(t *testing.T) { - cs, readyBlocks := newTestChainSync(t) +func TestChainSync_doSync_Integration(t *testing.T) { + ctrl := gomock.NewController(t) + readyBlocks := newBlockQueue(maxResponseSize) + cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) max := uint32(1) req := &network.BlockRequestMessage{ RequestedData: bootstrapRequestData, @@ -635,6 +499,9 @@ func TestChainSync_doSync(t *testing.T) { Direction: network.Ascending, Max: &max, } + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil).Times(2) + cs.blockState = mockBlockState workerErr := cs.doSync(req, make(map[peer.ID]struct{})) require.NotNil(t, workerErr) @@ -644,6 +511,18 @@ func TestChainSync_doSync(t *testing.T) { number: 100, } + mockNetwork := NewMockNetwork(ctrl) + startingBlock := variadic.MustNewUint32OrHash(1) + max1 := uint32(1) + mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ + RequestedData: 19, + StartingBlock: *startingBlock, + EndBlockHash: nil, + Direction: 0, + Max: &max1, + }) + cs.network = mockNetwork + workerErr = cs.doSync(req, make(map[peer.ID]struct{})) require.NotNil(t, workerErr) require.Equal(t, errNilResponse, workerErr.err) @@ -660,9 +539,8 @@ func TestChainSync_doSync(t *testing.T) { }, } - cs.network = new(syncmocks.Network) - cs.network.(*syncmocks.Network).On("DoBlockRequest", - mock.AnythingOfType("peer.ID"), + cs.network = new(mocks.Network) + cs.network.(*mocks.Network).On("DoBlockRequest", mock.AnythingOfType("peer.ID"), mock.AnythingOfType("*network.BlockRequestMessage")).Return(resp, nil) workerErr = cs.doSync(req, make(map[peer.ID]struct{})) @@ -696,9 +574,8 @@ func TestChainSync_doSync(t *testing.T) { // test to see if descending blocks get reversed req.Direction = network.Descending - cs.network = new(syncmocks.Network) - cs.network.(*syncmocks.Network).On("DoBlockRequest", - mock.AnythingOfType("peer.ID"), + cs.network = new(mocks.Network) + cs.network.(*mocks.Network).On("DoBlockRequest", mock.AnythingOfType("peer.ID"), mock.AnythingOfType("*network.BlockRequestMessage")).Return(resp, nil) workerErr = cs.doSync(req, make(map[peer.ID]struct{})) require.Nil(t, workerErr) @@ -712,8 +589,10 @@ func TestChainSync_doSync(t *testing.T) { require.Equal(t, resp.BlockData[1], bd) } -func TestHandleReadyBlock(t *testing.T) { - cs, readyBlocks := newTestChainSync(t) +func TestHandleReadyBlock_Integration(t *testing.T) { + ctrl := gomock.NewController(t) + readyBlocks := newBlockQueue(maxResponseSize) + cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) // test that descendant chain gets returned by getReadyDescendants on block 1 being ready header1 := &types.Header{ @@ -767,8 +646,9 @@ func TestHandleReadyBlock(t *testing.T) { require.Equal(t, block3.ToBlockData(), readyBlocks.pop(ctx)) } -func TestChainSync_determineSyncPeers(t *testing.T) { - cs, _ := newTestChainSync(t) +func TestChainSync_determineSyncPeers_Integration(t *testing.T) { + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) req := &network.BlockRequestMessage{} testPeerA := peer.ID("a") @@ -817,89 +697,3 @@ func TestChainSync_determineSyncPeers(t *testing.T) { require.Equal(t, 1, len(peers)) require.Equal(t, []peer.ID{testPeerB}, peers) } - -func TestChainSync_highestBlock(t *testing.T) { - type input struct { - peerState map[peer.ID]*peerState - } - type output struct { - highestBlock uint - err error - } - type test struct { - name string - in input - out output - } - tests := []test{ - { - name: "when has an empty map should return 0, errNoPeers", - in: input{ - peerState: map[peer.ID]*peerState{}, - }, - out: output{ - highestBlock: 0, - err: errNoPeers, - }, - }, - { - name: "when has a nil map should return 0, errNoPeers", - in: input{ - peerState: nil, - }, - out: output{ - highestBlock: 0, - err: errNoPeers, - }, - }, - { - name: "when has only one peer with number 90 should return 90, nil", - in: input{ - peerState: map[peer.ID]*peerState{ - "idtest": {number: 90}, - }, - }, - out: output{ - highestBlock: 90, - err: nil, - }, - }, - { - name: "when has two peers (p1, p2) with p1.number 90 and p2.number 190 should return 190, nil", - in: input{ - peerState: map[peer.ID]*peerState{ - "idtest#1": {number: 90}, - "idtest#2": {number: 190}, - }, - }, - out: output{ - highestBlock: 190, - err: nil, - }, - }, - { - name: "when has two peers (p1, p2) with p1.number 190 and p2.number 90 should return 190, nil", - in: input{ - peerState: map[peer.ID]*peerState{ - "idtest#1": {number: 190}, - "idtest#2": {number: 90}, - }, - }, - out: output{ - highestBlock: 190, - err: nil, - }, - }, - } - - for _, ts := range tests { - t.Run(ts.name, func(t *testing.T) { - cs, _ := newTestChainSync(t) - cs.peerState = ts.in.peerState - - highestBlock, err := cs.getHighestBlock() - require.ErrorIs(t, err, ts.out.err) - require.Equal(t, highestBlock, ts.out.highestBlock) - }) - } -} diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go new file mode 100644 index 0000000000..ae0e027692 --- /dev/null +++ b/dot/sync/chain_sync_test.go @@ -0,0 +1,1561 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/peerset" + "github.com/ChainSafe/gossamer/dot/sync/mocks" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/ChainSafe/gossamer/lib/trie" + "github.com/golang/mock/gomock" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +const defaultSlotDuration = 6 * time.Second + +func Test_chainSyncState_String(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + s chainSyncState + want string + }{ + { + name: "case bootstrap", + s: bootstrap, + want: "bootstrap", + }, + { + name: "case tip", + s: tip, + want: "tip", + }, + { + name: "case unknown", + s: 3, + want: "unknown", + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got := tt.s.String() + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_chainSync_setPeerHead(t *testing.T) { + t.Parallel() + + errTest := errors.New("test error") + const somePeer = peer.ID("abc") + someHash := common.Hash{1, 2, 3, 4} + + testCases := map[string]struct { + chainSyncBuilder func(ctrl *gomock.Controller) *chainSync + peerID peer.ID + hash common.Hash + number uint + errWrapped error + errMessage string + expectedPeerIDToPeerState map[peer.ID]*peerState + expectedQueuedPeerStates []*peerState + }{ + "best block header error": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().BestBlockHeader().Return(nil, errTest) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + } + }, + peerID: somePeer, + hash: someHash, + number: 1, + errWrapped: errTest, + errMessage: "best block header: test error", + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 1, + }, + }, + }, + "number smaller than best block number get hash by number error": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{}, errTest) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + } + }, + peerID: somePeer, + hash: someHash, + number: 1, + errWrapped: errTest, + errMessage: "get block hash by number: test error", + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 1, + }, + }, + }, + "number smaller than best block number and same hash": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(someHash, nil) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + } + }, + peerID: somePeer, + hash: someHash, + number: 1, + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 1, + }, + }, + }, + "number smaller than best block number get highest finalised header error": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{2}, nil) // other hash than someHash + blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + } + }, + peerID: somePeer, + hash: someHash, + number: 1, + errWrapped: errTest, + errMessage: "get highest finalised header: test error", + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 1, + }, + }, + }, + "number smaller than best block number and finalised number equal than number": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + network := NewMockNetwork(ctrl) + network.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, somePeer) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + network: network, + } + }, + peerID: somePeer, + hash: someHash, + number: 1, + errWrapped: errPeerOnInvalidFork, + errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 1, + }, + }, + }, + "number smaller than best block number and finalised number bigger than number": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + network := NewMockNetwork(ctrl) + network.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, somePeer) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + network: network, + } + }, + peerID: somePeer, + hash: someHash, + number: 1, + errWrapped: errPeerOnInvalidFork, + errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 1, + }, + }, + }, + "number smaller than best block number and " + + "finalised number smaller than number and " + + "has header error": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 3} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(2)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + blockState.EXPECT().HasHeader(someHash).Return(false, errTest) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + } + }, + peerID: somePeer, + hash: someHash, + number: 2, + errWrapped: errTest, + errMessage: "has header: test error", + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 2, + }, + }, + }, + "number smaller than best block number and " + + "finalised number smaller than number and " + + "has the hash": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 3} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(2)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + blockState.EXPECT().HasHeader(someHash).Return(true, nil) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + } + }, + peerID: somePeer, + hash: someHash, + number: 2, + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 2, + }, + }, + }, + "number bigger than the head number add hash and number error": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). + Return(errTest) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + pendingBlocks: pendingBlocks, + } + }, + peerID: somePeer, + hash: someHash, + number: 2, + errWrapped: errTest, + errMessage: "add hash and number: test error", + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 2, + }, + }, + }, + "number bigger than the head number success": { + chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). + Return(nil) + return &chainSync{ + peerState: map[peer.ID]*peerState{}, + blockState: blockState, + pendingBlocks: pendingBlocks, + // buffered of 1 so setPeerHead can write to it + // without a consumer of the channel on the other end. + workQueue: make(chan *peerState, 1), + } + }, + peerID: somePeer, + hash: someHash, + number: 2, + expectedPeerIDToPeerState: map[peer.ID]*peerState{ + somePeer: { + who: somePeer, + hash: someHash, + number: 2, + }, + }, + expectedQueuedPeerStates: []*peerState{ + { + who: somePeer, + hash: someHash, + number: 2, + }, + }, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + chainSync := testCase.chainSyncBuilder(ctrl) + + err := chainSync.setPeerHead(testCase.peerID, testCase.hash, testCase.number) + + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + assert.Equal(t, testCase.expectedPeerIDToPeerState, chainSync.peerState) + + require.Equal(t, len(testCase.expectedQueuedPeerStates), len(chainSync.workQueue)) + for _, expectedPeerState := range testCase.expectedQueuedPeerStates { + peerState := <-chainSync.workQueue + assert.Equal(t, expectedPeerState, peerState) + } + }) + } +} + +func TestChainSync_sync_bootstrap_withWorkerError(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) + mockBlockState := NewMockBlockState(ctrl) + mockHeader, err := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, + types.NewDigest()) + require.NoError(t, err) + mockBlockState.EXPECT().BestBlockHeader().Return(mockHeader, nil).Times(2) + cs.blockState = mockBlockState + cs.handler = newBootstrapSyncer(mockBlockState) + + mockNetwork := NewMockNetwork(ctrl) + startingBlock := variadic.MustNewUint32OrHash(1) + max := uint32(128) + mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ + RequestedData: 19, + StartingBlock: *startingBlock, + EndBlockHash: nil, + Direction: 0, + Max: &max, + }) + cs.network = mockNetwork + + go cs.sync() + defer cs.cancel() + + testPeer := peer.ID("noot") + cs.peerState[testPeer] = &peerState{ + number: 1000, + } + + cs.workQueue <- cs.peerState[testPeer] + + select { + case res := <-cs.resultQueue: + expected := &workerError{ + err: errNilResponse, // since MockNetwork returns a nil response + who: testPeer, + } + require.Equal(t, expected, res.err) + case <-time.After(5 * time.Second): + t.Fatal("did not get worker response") + } + + require.Equal(t, bootstrap, cs.state) +} + +func TestChainSync_sync_tip(t *testing.T) { + t.Parallel() + + done := make(chan struct{}) + + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) + cs.blockState = new(mocks.BlockState) + header, err := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 1000, + types.NewDigest()) + require.NoError(t, err) + cs.blockState.(*mocks.BlockState).On("BestBlockHeader").Return(header, nil) + cs.blockState.(*mocks.BlockState).On("GetHighestFinalisedHeader").Run(func(args mock.Arguments) { + close(done) + }).Return(header, nil) + + go cs.sync() + defer cs.cancel() + + testPeer := peer.ID("noot") + cs.peerState[testPeer] = &peerState{ + number: 999, + } + + cs.workQueue <- cs.peerState[testPeer] + <-done + require.Equal(t, tip, cs.state) +} + +func TestChainSync_getTarget(t *testing.T) { + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) + require.Equal(t, uint(1<<32-1), cs.getTarget()) + cs.peerState = map[peer.ID]*peerState{ + "a": { + number: 0, // outlier + }, + "b": { + number: 110, + }, + "c": { + number: 120, + }, + "d": { + number: 130, + }, + "e": { + number: 140, + }, + "f": { + number: 150, + }, + "g": { + number: 1000, // outlier + }, + } + + require.Equal(t, uint(130), cs.getTarget()) // sum:650/count:5= avg:130 + + cs.peerState = map[peer.ID]*peerState{ + "testA": { + number: 1000, + }, + "testB": { + number: 2000, + }, + } + + require.Equal(t, uint(1500), cs.getTarget()) +} + +func TestWorkerToRequests(t *testing.T) { + t.Parallel() + + w := &worker{ + startNumber: uintPtr(10), + targetNumber: uintPtr(1), + direction: network.Ascending, + } + _, err := workerToRequests(w) + require.Equal(t, errInvalidDirection, err) + + type testCase struct { + w *worker + expected []*network.BlockRequestMessage + } + + var ( + max128 = uint32(128) + max9 = uint32(9) + max64 = uint32(64) + ) + + testCases := map[string]testCase{ + "test 0": { + w: &worker{ + startNumber: uintPtr(1), + targetNumber: uintPtr(1 + maxResponseSize), + direction: network.Ascending, + requestData: bootstrapRequestData, + }, + expected: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(1), + EndBlockHash: nil, + Direction: network.Ascending, + Max: &max128, + }, + }, + }, + "test 1": { + w: &worker{ + startNumber: uintPtr(1), + targetNumber: uintPtr(1 + (maxResponseSize * 2)), + direction: network.Ascending, + requestData: bootstrapRequestData, + }, + expected: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(1), + EndBlockHash: nil, + Direction: network.Ascending, + Max: &max128, + }, + { + RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, + StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), + EndBlockHash: nil, + Direction: network.Ascending, + Max: &max128, + }, + }, + }, + "test 2": { + w: &worker{ + startNumber: uintPtr(1), + targetNumber: uintPtr(10), + direction: network.Ascending, + requestData: bootstrapRequestData, + }, + expected: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(1), + EndBlockHash: nil, + Direction: network.Ascending, + Max: &max128, + }, + }, + }, + "test 3": { + w: &worker{ + startNumber: uintPtr(10), + targetNumber: uintPtr(1), + direction: network.Descending, + requestData: bootstrapRequestData, + }, + expected: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(10), + EndBlockHash: nil, + Direction: network.Descending, + Max: &max9, + }, + }, + }, + "test 4": { + w: &worker{ + startNumber: uintPtr(1), + targetNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), + direction: network.Ascending, + requestData: bootstrapRequestData, + }, + expected: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(1), + EndBlockHash: nil, + Direction: network.Ascending, + Max: &max128, + }, + { + RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, + StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), + EndBlockHash: nil, + Direction: network.Ascending, + Max: &max128, + }, + }, + }, + "test 5": { + w: &worker{ + startNumber: uintPtr(1), + targetNumber: uintPtr(10), + targetHash: common.Hash{0xa}, + direction: network.Ascending, + requestData: bootstrapRequestData, + }, + expected: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(1), + EndBlockHash: &(common.Hash{0xa}), + Direction: network.Ascending, + Max: &max128, + }, + }, + }, + "test 6": { + w: &worker{ + startNumber: uintPtr(1), + startHash: common.Hash{0xb}, + targetNumber: uintPtr(10), + targetHash: common.Hash{0xc}, + direction: network.Ascending, + requestData: bootstrapRequestData, + }, + expected: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{0xb}), + EndBlockHash: &(common.Hash{0xc}), + Direction: network.Ascending, + Max: &max128, + }, + }, + }, + "test 7": { + w: &worker{ + startNumber: uintPtr(10), + targetNumber: uintPtr(10), + direction: network.Ascending, + requestData: bootstrapRequestData, + }, + expected: []*network.BlockRequestMessage{ + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(10), + Direction: network.Ascending, + Max: &max128, + }, + }, + }, + "test 8": { + w: &worker{ + startNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), + targetNumber: uintPtr(1), + direction: network.Descending, + requestData: bootstrapRequestData, + }, + expected: []*network.BlockRequestMessage{ + { + RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, + StartingBlock: *variadic.MustNewUint32OrHash(1 + (maxResponseSize / 2)), + EndBlockHash: nil, + Direction: network.Descending, + Max: &max64, + }, + { + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize + (maxResponseSize / 2)), + EndBlockHash: nil, + Direction: network.Descending, + Max: &max128, + }, + }, + }, + } + + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + reqs, err := workerToRequests(tc.w) + require.NoError(t, err) + require.Equal(t, tc.expected, reqs) + }) + } +} + +func TestChainSync_validateResponse(t *testing.T) { + t.Parallel() + tests := map[string]struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + networkBuilder func(ctrl *gomock.Controller) Network + req *network.BlockRequestMessage + resp *network.BlockResponseMessage + expectedError error + }{ + "nil req, nil resp": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + return mockBlockState + }, + networkBuilder: func(ctrl *gomock.Controller) Network { + return NewMockNetwork(ctrl) + }, + expectedError: errEmptyBlockData, + }, + "handle error response is not chain, has header": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) + return mockBlockState + }, + networkBuilder: func(ctrl *gomock.Controller) Network { + return NewMockNetwork(ctrl) + }, + req: &network.BlockRequestMessage{ + RequestedData: network.RequestedDataHeader, + }, + resp: &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Header: &types.Header{ + Number: 1, + }, + Body: &types.Body{}, + }, + { + Header: &types.Header{ + Number: 2, + }, + Body: &types.Body{}, + }, + }, + }, + expectedError: errResponseIsNotChain, + }, + "handle justification-only request, unknown block": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + return mockBlockState + }, + networkBuilder: func(ctrl *gomock.Controller) Network { + mockNetwork := NewMockNetwork(ctrl) + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadJustificationValue, + Reason: peerset.BadJustificationReason, + }, peer.ID("")) + return mockNetwork + }, + req: &network.BlockRequestMessage{ + RequestedData: network.RequestedDataJustification, + }, + resp: &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Justification: &[]byte{0}, + }, + }, + }, + expectedError: errUnknownBlockForJustification, + }, + "handle error unknown parent": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + return mockBlockState + }, + networkBuilder: func(ctrl *gomock.Controller) Network { + return NewMockNetwork(ctrl) + }, + req: &network.BlockRequestMessage{ + RequestedData: network.RequestedDataHeader, + }, + resp: &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Header: &types.Header{ + Number: 1, + }, + Body: &types.Body{}, + }, + { + Header: &types.Header{ + Number: 2, + }, + Body: &types.Body{}, + }, + }, + }, + expectedError: errUnknownParent, + }, + "no error": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) + return mockBlockState + }, + networkBuilder: func(ctrl *gomock.Controller) Network { + return NewMockNetwork(ctrl) + }, + req: &network.BlockRequestMessage{ + RequestedData: network.RequestedDataHeader, + }, + resp: &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Header: &types.Header{ + Number: 2, + }, + Body: &types.Body{}, + }, + { + Header: &types.Header{ + ParentHash: (&types.Header{ + Number: 2, + }).Hash(), + Number: 3, + }, + Body: &types.Body{}, + }, + }, + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + cfg := &chainSyncConfig{ + bs: tt.blockStateBuilder(ctrl), + pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), + readyBlocks: newBlockQueue(maxResponseSize), + net: tt.networkBuilder(ctrl), + } + cs := newChainSync(cfg) + + err := cs.validateResponse(tt.req, tt.resp, "") + if tt.expectedError != nil { + assert.EqualError(t, err, tt.expectedError.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestChainSync_doSync(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + readyBlocks := newBlockQueue(maxResponseSize) + cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) + + max := uint32(1) + req := &network.BlockRequestMessage{ + RequestedData: bootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(1), + EndBlockHash: nil, + Direction: network.Ascending, + Max: &max, + } + + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil).Times(2) + cs.blockState = mockBlockState + + workerErr := cs.doSync(req, make(map[peer.ID]struct{})) + require.NotNil(t, workerErr) + require.Equal(t, errNoPeers, workerErr.err) + + cs.peerState["noot"] = &peerState{ + number: 100, + } + + mockNetwork := NewMockNetwork(ctrl) + startingBlock := variadic.MustNewUint32OrHash(1) + max1 := uint32(1) + mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ + RequestedData: 19, + StartingBlock: *startingBlock, + EndBlockHash: nil, + Direction: 0, + Max: &max1, + }) + cs.network = mockNetwork + + workerErr = cs.doSync(req, make(map[peer.ID]struct{})) + require.NotNil(t, workerErr) + require.Equal(t, errNilResponse, workerErr.err) + + resp := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: common.Hash{0x1}, + Header: &types.Header{ + Number: 1, + }, + Body: &types.Body{}, + }, + }, + } + + mockNetwork = NewMockNetwork(ctrl) + mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ + RequestedData: 19, + StartingBlock: *startingBlock, + EndBlockHash: nil, + Direction: 0, + Max: &max1, + }).Return(resp, nil) + cs.network = mockNetwork + + workerErr = cs.doSync(req, make(map[peer.ID]struct{})) + require.Nil(t, workerErr) + bd := readyBlocks.pop(context.Background()) + require.NotNil(t, bd) + require.Equal(t, resp.BlockData[0], bd) + + parent := (&types.Header{ + Number: 2, + }).Hash() + resp = &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: common.Hash{0x3}, + Header: &types.Header{ + ParentHash: parent, + Number: 3, + }, + Body: &types.Body{}, + }, + { + Hash: common.Hash{0x2}, + Header: &types.Header{ + Number: 2, + }, + Body: &types.Body{}, + }, + }, + } + + // test to see if descending blocks get reversed + req.Direction = network.Descending + mockNetwork = NewMockNetwork(ctrl) + mockNetwork.EXPECT().DoBlockRequest(peer.ID("noot"), &network.BlockRequestMessage{ + RequestedData: 19, + StartingBlock: *startingBlock, + EndBlockHash: nil, + Direction: 1, + Max: &max1, + }).Return(resp, nil) + cs.network = mockNetwork + workerErr = cs.doSync(req, make(map[peer.ID]struct{})) + require.Nil(t, workerErr) + + bd = readyBlocks.pop(context.Background()) + require.NotNil(t, bd) + require.Equal(t, resp.BlockData[0], bd) + + bd = readyBlocks.pop(context.Background()) + require.NotNil(t, bd) + require.Equal(t, resp.BlockData[1], bd) +} + +func TestHandleReadyBlock(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + readyBlocks := newBlockQueue(maxResponseSize) + cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) + + // test that descendant chain gets returned by getReadyDescendants on block 1 being ready + header1 := &types.Header{ + Number: 1, + } + block1 := &types.Block{ + Header: *header1, + Body: types.Body{}, + } + + header2 := &types.Header{ + ParentHash: header1.Hash(), + Number: 2, + } + block2 := &types.Block{ + Header: *header2, + Body: types.Body{}, + } + cs.pendingBlocks.addBlock(block2) + + header3 := &types.Header{ + ParentHash: header2.Hash(), + Number: 3, + } + block3 := &types.Block{ + Header: *header3, + Body: types.Body{}, + } + cs.pendingBlocks.addBlock(block3) + + header2NotDescendant := &types.Header{ + ParentHash: common.Hash{0xff}, + Number: 2, + } + block2NotDescendant := &types.Block{ + Header: *header2NotDescendant, + Body: types.Body{}, + } + cs.pendingBlocks.addBlock(block2NotDescendant) + + cs.handleReadyBlock(block1.ToBlockData()) + + require.False(t, cs.pendingBlocks.hasBlock(header1.Hash())) + require.False(t, cs.pendingBlocks.hasBlock(header2.Hash())) + require.False(t, cs.pendingBlocks.hasBlock(header3.Hash())) + require.True(t, cs.pendingBlocks.hasBlock(header2NotDescendant.Hash())) + + require.Equal(t, block1.ToBlockData(), readyBlocks.pop(context.Background())) + require.Equal(t, block2.ToBlockData(), readyBlocks.pop(context.Background())) + require.Equal(t, block3.ToBlockData(), readyBlocks.pop(context.Background())) +} + +func TestChainSync_determineSyncPeers(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + cs := newTestChainSync(ctrl) + + req := &network.BlockRequestMessage{} + testPeerA := peer.ID("a") + testPeerB := peer.ID("b") + peersTried := make(map[peer.ID]struct{}) + + // test base case + cs.peerState[testPeerA] = &peerState{ + number: 129, + } + cs.peerState[testPeerB] = &peerState{ + number: 257, + } + + peers := cs.determineSyncPeers(req, peersTried) + require.Equal(t, 2, len(peers)) + require.Contains(t, peers, testPeerA) + require.Contains(t, peers, testPeerB) + + // test peer ignored case + cs.ignorePeers[testPeerA] = struct{}{} + peers = cs.determineSyncPeers(req, peersTried) + require.Equal(t, 1, len(peers)) + require.Equal(t, []peer.ID{testPeerB}, peers) + + // test all peers ignored case + cs.ignorePeers[testPeerB] = struct{}{} + peers = cs.determineSyncPeers(req, peersTried) + require.Equal(t, 2, len(peers)) + require.Contains(t, peers, testPeerA) + require.Contains(t, peers, testPeerB) + require.Equal(t, 0, len(cs.ignorePeers)) + + // test peer's best block below number case, shouldn't include that peer + start, err := variadic.NewUint32OrHash(130) + require.NoError(t, err) + req.StartingBlock = *start + peers = cs.determineSyncPeers(req, peersTried) + require.Equal(t, 1, len(peers)) + require.Equal(t, []peer.ID{testPeerB}, peers) + + // test peer tried case, should ignore peer already tried + peersTried[testPeerA] = struct{}{} + req.StartingBlock = variadic.Uint32OrHash{} + peers = cs.determineSyncPeers(req, peersTried) + require.Equal(t, 1, len(peers)) + require.Equal(t, []peer.ID{testPeerB}, peers) +} + +func Test_chainSync_logSyncSpeed(t *testing.T) { + t.Parallel() + + type fields struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + networkBuilder func(ctrl *gomock.Controller, done chan struct{}) Network + state chainSyncState + benchmarker *syncBenchmarker + } + tests := []struct { + name string + fields fields + }{ + { + name: "state bootstrap", + fields: fields{ + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).AnyTimes() + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) + return mockBlockState + }, + networkBuilder: func(ctrl *gomock.Controller, done chan struct{}) Network { + mockNetwork := NewMockNetwork(ctrl) + mockNetwork.EXPECT().Peers().DoAndReturn(func() error { + close(done) + return nil + }) + return mockNetwork + }, + benchmarker: newSyncBenchmarker(10), + state: bootstrap, + }, + }, + { + name: "case tip", + fields: fields{ + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).AnyTimes() + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) + return mockBlockState + }, + networkBuilder: func(ctrl *gomock.Controller, done chan struct{}) Network { + mockNetwork := NewMockNetwork(ctrl) + mockNetwork.EXPECT().Peers().DoAndReturn(func() error { + close(done) + return nil + }) + return mockNetwork + }, + benchmarker: newSyncBenchmarker(10), + state: tip, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + cs := &chainSync{ + ctx: ctx, + cancel: cancel, + blockState: tt.fields.blockStateBuilder(ctrl), + network: tt.fields.networkBuilder(ctrl, done), + state: tt.fields.state, + benchmarker: tt.fields.benchmarker, + logSyncPeriod: time.Millisecond, + } + go cs.logSyncSpeed() + <-done + cancel() + }) + } +} + +func Test_chainSync_start(t *testing.T) { + t.Parallel() + + type fields struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + disjointBlockSetBuilder func(ctrl *gomock.Controller) DisjointBlockSet + networkBuilder func(ctrl *gomock.Controller, done chan struct{}) Network + benchmarker *syncBenchmarker + slotDuration time.Duration + } + tests := []struct { + name string + fields fields + }{ + { + name: "base case", + fields: fields{ + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).AnyTimes() + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).AnyTimes() + return mockBlockState + }, + disjointBlockSetBuilder: func(ctrl *gomock.Controller) DisjointBlockSet { + mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) + mockDisjointBlockSet.EXPECT().run(gomock.Any()) + return mockDisjointBlockSet + }, + networkBuilder: func(ctrl *gomock.Controller, done chan struct{}) Network { + mockNetwork := NewMockNetwork(ctrl) + mockNetwork.EXPECT().Peers().DoAndReturn(func() []common.PeerInfo { + close(done) + return nil + }) + return mockNetwork + }, + slotDuration: defaultSlotDuration, + benchmarker: newSyncBenchmarker(1), + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + cs := &chainSync{ + ctx: ctx, + cancel: cancel, + blockState: tt.fields.blockStateBuilder(ctrl), + pendingBlocks: tt.fields.disjointBlockSetBuilder(ctrl), + network: tt.fields.networkBuilder(ctrl, done), + benchmarker: tt.fields.benchmarker, + slotDuration: tt.fields.slotDuration, + logSyncPeriod: time.Second, + } + cs.start() + <-done + cs.stop() + }) + } +} + +func Test_chainSync_setBlockAnnounce(t *testing.T) { + type args struct { + from peer.ID + header *types.Header + } + tests := map[string]struct { + chainSyncBuilder func(ctrl *gomock.Controller) chainSync + args args + wantErr error + }{ + "base case": { + args: args{ + header: &types.Header{Number: 2}, + }, + chainSyncBuilder: func(ctrl *gomock.Controller) chainSync { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasHeader(common.MustHexToHash( + "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf")).Return(true, nil) + mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) + return chainSync{ + blockState: mockBlockState, + pendingBlocks: mockDisjointBlockSet, + } + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + ctrl := gomock.NewController(t) + sync := tt.chainSyncBuilder(ctrl) + err := sync.setBlockAnnounce(tt.args.from, tt.args.header) + assert.ErrorIs(t, err, tt.wantErr) + }) + } +} + +func Test_chainSync_getHighestBlock(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + peerState map[peer.ID]*peerState + wantHighestBlock uint + expectedError error + }{ + { + name: "error no peers", + expectedError: errors.New("no peers to sync with"), + }, + { + name: "base case", + peerState: map[peer.ID]*peerState{"1": {number: 2}}, + wantHighestBlock: 2, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + cs := &chainSync{ + peerState: tt.peerState, + } + gotHighestBlock, err := cs.getHighestBlock() + if tt.expectedError != nil { + assert.EqualError(t, err, tt.expectedError.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.wantHighestBlock, gotHighestBlock) + }) + } +} + +func Test_chainSync_handleResult(t *testing.T) { + t.Parallel() + mockError := errors.New("test mock error") + tests := map[string]struct { + chainSyncBuilder func(ctrl *gomock.Controller, result *worker) chainSync + maxWorkerRetries uint16 + res *worker + err error + }{ + "res.err == nil": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + return chainSync{ + workerState: newWorkerState(), + } + }, + res: &worker{}, + }, + "res.err.err.Error() == context.Canceled": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + return chainSync{ + workerState: newWorkerState(), + } + }, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: context.Canceled, + }, + }, + }, + "res.err.err.Error() == context.DeadlineExceeded": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + mockNetwork := NewMockNetwork(ctrl) + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -1024, Reason: "Request timeout"}, + peer.ID("")) + mockWorkHandler := NewMockworkHandler(ctrl) + mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) + return chainSync{ + workerState: newWorkerState(), + network: mockNetwork, + handler: mockWorkHandler, + } + }, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: context.DeadlineExceeded, + }, + }, + }, + "res.err.err.Error() dial backoff": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + return chainSync{ + workerState: newWorkerState(), + } + }, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: errors.New("dial backoff"), + }, + }, + }, + "res.err.err.Error() == errNoPeers": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + return chainSync{ + workerState: newWorkerState(), + } + }, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: errNoPeers, + }, + }, + }, + "res.err.err.Error() == protocol not supported": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + mockNetwork := NewMockNetwork(ctrl) + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -2147483648, + Reason: "Unsupported protocol"}, + peer.ID("")) + return chainSync{ + workerState: newWorkerState(), + network: mockNetwork, + } + }, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: errors.New("protocol not supported"), + }, + }, + }, + "no error, no retries": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + mockWorkHandler := NewMockworkHandler(ctrl) + mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) + return chainSync{ + workerState: newWorkerState(), + handler: mockWorkHandler, + } + }, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: errors.New(""), + }, + }, + }, + "handle work result error, no retries": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + mockWorkHandler := NewMockworkHandler(ctrl) + mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, mockError) + return chainSync{ + workerState: newWorkerState(), + handler: mockWorkHandler, + } + }, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: errors.New(""), + }, + }, + err: mockError, + }, + "handle work result nil, no retries": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + mockWorkHandler := NewMockworkHandler(ctrl) + mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, nil) + return chainSync{ + workerState: newWorkerState(), + handler: mockWorkHandler, + } + }, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: errors.New(""), + }, + }, + }, + "no error, maxWorkerRetries 2": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + mockWorkHandler := NewMockworkHandler(ctrl) + mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) + mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) + mockDisjointBlockSet.EXPECT().removeBlock(common.Hash{}) + return chainSync{ + workerState: newWorkerState(), + handler: mockWorkHandler, + pendingBlocks: mockDisjointBlockSet, + } + }, + maxWorkerRetries: 2, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: errors.New(""), + }, + pendingBlock: newPendingBlock(common.Hash{}, 1, nil, nil, time.Now()), + }, + }, + "no error": { + chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { + mockWorkHandler := NewMockworkHandler(ctrl) + mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) + mockWorkHandler.EXPECT().hasCurrentWorker(&worker{ + ctx: context.Background(), + err: &workerError{ + err: mockError, + }, + retryCount: 1, + peersTried: map[peer.ID]struct{}{ + "": {}, + }, + }, newWorkerState().workers).Return(true) + return chainSync{ + workerState: newWorkerState(), + handler: mockWorkHandler, + maxWorkerRetries: 2, + } + }, + res: &worker{ + ctx: context.Background(), + err: &workerError{ + err: mockError, + }, + }, + }, + } + for testName, tt := range tests { + tt := tt + t.Run(testName, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + sync := tt.chainSyncBuilder(ctrl, tt.res) + err := sync.handleResult(tt.res) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller, readyBlocks *blockQueue) *chainSync { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + + cfg := &chainSyncConfig{ + bs: mockBlockState, + readyBlocks: readyBlocks, + pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), + minPeers: 1, + maxPeers: 5, + slotDuration: defaultSlotDuration, + } + + return newChainSync(cfg) +} + +func newTestChainSync(ctrl *gomock.Controller) *chainSync { + readyBlocks := newBlockQueue(maxResponseSize) + return newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) +} diff --git a/dot/sync/disjoint_block_set.go b/dot/sync/disjoint_block_set.go index 71f65a78ec..78dbde3abe 100644 --- a/dot/sync/disjoint_block_set.go +++ b/dot/sync/disjoint_block_set.go @@ -23,6 +23,8 @@ var ( errSetAtLimit = errors.New("cannot add block; set is at capacity") ) +//go:generate mockgen -destination=mock_disjoint_block_set_test.go -package=$GOPACKAGE . DisjointBlockSet + // DisjointBlockSet represents a set of incomplete blocks, or blocks // with an unknown parent. it is implemented by *disjointBlockSet type DisjointBlockSet interface { diff --git a/dot/sync/disjoint_block_set_integeration_test.go b/dot/sync/disjoint_block_set_integeration_test.go index afcd7458b3..28d3924875 100644 --- a/dot/sync/disjoint_block_set_integeration_test.go +++ b/dot/sync/disjoint_block_set_integeration_test.go @@ -12,15 +12,12 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" - + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDisjointBlockSet(t *testing.T) { s := newDisjointBlockSet(pendingBlocksLimit) - s.timeNow = func() time.Time { - return time.Time{} - } hash := common.Hash{0xa, 0xb} const number uint = 100 @@ -29,12 +26,13 @@ func TestDisjointBlockSet(t *testing.T) { require.Equal(t, 1, s.size()) expected := &pendingBlock{ - hash: hash, - number: number, - clearAt: time.Time{}.Add(ttl), + hash: hash, + number: number, } blocks := s.getBlocks() require.Equal(t, 1, len(blocks)) + assert.Greater(t, blocks[0].clearAt, time.Now().Add(ttl-time.Minute)) + blocks[0].clearAt = time.Time{} require.Equal(t, expected, blocks[0]) header := &types.Header{ @@ -43,14 +41,15 @@ func TestDisjointBlockSet(t *testing.T) { s.addHeader(header) require.True(t, s.hasBlock(header.Hash())) require.Equal(t, 2, s.size()) - expected = &pendingBlock{ - hash: header.Hash(), - number: header.Number, - header: header, - clearAt: time.Time{}.Add(ttl), + hash: header.Hash(), + number: header.Number, + header: header, } - require.Equal(t, expected, s.getBlock(header.Hash())) + block1 := s.getBlock(header.Hash()) + assert.Greater(t, block1.clearAt, time.Now().Add(ttl-time.Minute)) + block1.clearAt = time.Time{} + require.Equal(t, expected, block1) header2 := &types.Header{ Number: 999, @@ -60,12 +59,14 @@ func TestDisjointBlockSet(t *testing.T) { s.addHeader(header2) require.Equal(t, 3, s.size()) expected = &pendingBlock{ - hash: header2.Hash(), - number: header2.Number, - header: header2, - clearAt: time.Time{}.Add(ttl), + hash: header2.Hash(), + number: header2.Number, + header: header2, } - require.Equal(t, expected, s.getBlock(header2.Hash())) + block2 := s.getBlock(header2.Hash()) + assert.Greater(t, block2.clearAt, time.Now().Add(ttl-time.Minute)) + block2.clearAt = time.Time{} + require.Equal(t, expected, block2) block := &types.Block{ Header: *header2, @@ -74,13 +75,15 @@ func TestDisjointBlockSet(t *testing.T) { s.addBlock(block) require.Equal(t, 3, s.size()) expected = &pendingBlock{ - hash: header2.Hash(), - number: header2.Number, - header: header2, - body: &block.Body, - clearAt: time.Time{}.Add(ttl), - } - require.Equal(t, expected, s.getBlock(header2.Hash())) + hash: header2.Hash(), + number: header2.Number, + header: header2, + body: &block.Body, + } + block3 := s.getBlock(header2.Hash()) + assert.Greater(t, block3.clearAt, time.Now().Add(ttl-time.Minute)) + block3.clearAt = time.Time{} + require.Equal(t, expected, block3) s.removeBlock(hash) require.Equal(t, 2, s.size()) diff --git a/dot/sync/disjoint_block_set_test.go b/dot/sync/disjoint_block_set_test.go new file mode 100644 index 0000000000..d954e4b0a7 --- /dev/null +++ b/dot/sync/disjoint_block_set_test.go @@ -0,0 +1,484 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "errors" + "testing" + "time" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/stretchr/testify/assert" +) + +func Test_disjointBlockSet_addBlock(t *testing.T) { + t.Parallel() + + hashHeader := func(header types.Header) common.Hash { + return header.Hash() + } + setHashToHeader := func(header types.Header) *types.Header { + header.Hash() + return &header + } + + timeNow := func() time.Time { + return time.Unix(0, 0) + } + tests := map[string]struct { + disjointBlockSet *disjointBlockSet + block *types.Block + expectedDisjointBlockSet *disjointBlockSet + err error + }{ + "add block beyond capacity": { + disjointBlockSet: &disjointBlockSet{}, + block: &types.Block{ + Header: types.Header{ + Number: 1, + }, + }, + expectedDisjointBlockSet: &disjointBlockSet{}, + err: errSetAtLimit, + }, + "add block": { + disjointBlockSet: &disjointBlockSet{ + limit: 1, + blocks: make(map[common.Hash]*pendingBlock), + timeNow: timeNow, + parentToChildren: make(map[common.Hash]map[common.Hash]struct{}), + }, + block: &types.Block{ + Header: types.Header{ + Number: 1, + ParentHash: common.Hash{1}, + }, + Body: []types.Extrinsic{[]byte{1}}, + }, + expectedDisjointBlockSet: &disjointBlockSet{ + limit: 1, + blocks: map[common.Hash]*pendingBlock{ + hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { + hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + number: 1, + header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + body: &types.Body{{1}}, + clearAt: time.Unix(0, int64(ttl)), + }, + }, + parentToChildren: map[common.Hash]map[common.Hash]struct{}{ + {1}: { + hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): {}, + }, + }, + }, + }, + "has block": { + disjointBlockSet: &disjointBlockSet{ + limit: 1, + blocks: map[common.Hash]*pendingBlock{ + hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { + hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + number: 1, + header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + body: &types.Body{{1}}, + clearAt: time.Unix(0, int64(ttl)), + }, + }, + timeNow: timeNow, + parentToChildren: make(map[common.Hash]map[common.Hash]struct{}), + }, + block: &types.Block{ + Header: types.Header{ + Number: 1, + ParentHash: common.Hash{1}, + }, + Body: []types.Extrinsic{[]byte{1}}, + }, + expectedDisjointBlockSet: &disjointBlockSet{ + limit: 1, + blocks: map[common.Hash]*pendingBlock{ + hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { + hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + number: 1, + header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + body: &types.Body{{1}}, + justification: nil, + clearAt: time.Unix(0, int64(ttl)), + }, + }, + parentToChildren: map[common.Hash]map[common.Hash]struct{}{}, + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + err := tt.disjointBlockSet.addBlock(tt.block) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + + tt.disjointBlockSet.timeNow = nil + assert.Equal(t, tt.expectedDisjointBlockSet, tt.disjointBlockSet) + }) + } +} + +func Test_disjointBlockSet_addHeader(t *testing.T) { + t.Parallel() + + hashHeader := func(header types.Header) common.Hash { + return header.Hash() + } + setHashToHeader := func(header types.Header) *types.Header { + header.Hash() + return &header + } + + tests := map[string]struct { + disjointBlockSet *disjointBlockSet + header *types.Header + expectedDisjointBlockSet *disjointBlockSet + err error + }{ + "add header beyond capactiy": { + disjointBlockSet: &disjointBlockSet{}, + header: &types.Header{ + Number: 1, + }, + expectedDisjointBlockSet: &disjointBlockSet{}, + err: errors.New("cannot add block; set is at capacity"), + }, + "add header": { + disjointBlockSet: &disjointBlockSet{ + blocks: make(map[common.Hash]*pendingBlock), + limit: 1, + timeNow: func() time.Time { return time.Unix(0, 0) }, + parentToChildren: make(map[common.Hash]map[common.Hash]struct{}), + }, + header: &types.Header{ + Number: 1, + ParentHash: common.Hash{1}, + }, + expectedDisjointBlockSet: &disjointBlockSet{ + limit: 1, + blocks: map[common.Hash]*pendingBlock{ + hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { + hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + number: 1, + header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + clearAt: time.Unix(0, int64(ttl)), + }, + }, + parentToChildren: map[common.Hash]map[common.Hash]struct{}{ + {1}: { + hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): {}, + }, + }, + }, + }, + "has header": { + disjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { + hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + number: 1, + header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + clearAt: time.Unix(0, int64(ttl)), + }, + }, + limit: 1, + timeNow: func() time.Time { return time.Unix(0, 0) }, + parentToChildren: make(map[common.Hash]map[common.Hash]struct{}), + }, + header: &types.Header{ + Number: 1, + ParentHash: common.Hash{1}, + }, + expectedDisjointBlockSet: &disjointBlockSet{ + limit: 1, + blocks: map[common.Hash]*pendingBlock{ + hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}): { + hash: hashHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + number: 1, + header: setHashToHeader(types.Header{Number: 1, ParentHash: common.Hash{1}}), + clearAt: time.Unix(0, int64(ttl)), + }, + }, + parentToChildren: map[common.Hash]map[common.Hash]struct{}{}, + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + err := tt.disjointBlockSet.addHeader(tt.header) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + + tt.disjointBlockSet.timeNow = nil + assert.Equal(t, tt.expectedDisjointBlockSet, tt.disjointBlockSet) + }) + } +} + +func Test_disjointBlockSet_clearBlocks(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + disjointBlockSet *disjointBlockSet + remaining map[common.Hash]*pendingBlock + }{ + { + name: "base case", + disjointBlockSet: &disjointBlockSet{ + limit: 0, + blocks: map[common.Hash]*pendingBlock{ + {1}: { + clearAt: time.Unix(1000, 0), + hash: common.Hash{1}, + }, + }, + timeNow: func() time.Time { return time.Unix(1001, 0) }, + }, + remaining: map[common.Hash]*pendingBlock{}, + }, + { + name: "remove clear one block", + disjointBlockSet: &disjointBlockSet{ + limit: 0, + blocks: map[common.Hash]*pendingBlock{ + {1}: { + clearAt: time.Unix(1000, 0), + hash: common.Hash{1}, + }, + {2}: { + clearAt: time.Unix(1002, 0), + hash: common.Hash{2}, + }, + }, + timeNow: func() time.Time { return time.Unix(1001, 0) }, + }, + remaining: map[common.Hash]*pendingBlock{ + {2}: { + clearAt: time.Unix(1002, 0), + hash: common.Hash{2}, + }, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + tt.disjointBlockSet.clearBlocks() + assert.Equal(t, tt.remaining, tt.disjointBlockSet.blocks) + }) + } +} + +func Test_disjointBlockSet_getBlocks(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + disjointBlockSet *disjointBlockSet + want []*pendingBlock + wantDisjointBlockSet *disjointBlockSet + }{ + { + name: "no blocks", + disjointBlockSet: &disjointBlockSet{}, + want: []*pendingBlock{}, + wantDisjointBlockSet: &disjointBlockSet{}, + }, + { + name: "base case", + disjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + {}: {}, + }, + }, + want: []*pendingBlock{{}}, + wantDisjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + {}: {}, + }, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + blocks := tt.disjointBlockSet.getBlocks() + assert.Equal(t, tt.want, blocks) + assert.Equal(t, tt.wantDisjointBlockSet, tt.disjointBlockSet) + }) + } +} + +func Test_disjointBlockSet_removeLowerBlocks(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + disjointBlockSet *disjointBlockSet + num uint + remaining map[common.Hash]*pendingBlock + wantDisjointBlockSet *disjointBlockSet + }{ + { + name: "number 0", + disjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + {1}: { + hash: common.Hash{1}, + number: 1, + }, + {10}: { + hash: common.Hash{10}, + number: 10, + }, + }, + }, + num: 0, + remaining: map[common.Hash]*pendingBlock{ + {1}: { + hash: common.Hash{1}, + number: 1, + }, + {10}: { + hash: common.Hash{10}, + number: 10, + }, + }, + wantDisjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + {1}: { + hash: common.Hash{1}, + number: 1, + }, + {10}: { + hash: common.Hash{10}, + number: 10, + }, + }, + }, + }, + { + name: "number 1", + disjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + {1}: { + hash: common.Hash{1}, + number: 1, + }, + {10}: { + hash: common.Hash{10}, + number: 10, + }, + }, + }, + num: 1, + remaining: map[common.Hash]*pendingBlock{{10}: { + hash: common.Hash{10}, + number: 10, + }, + }, + wantDisjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + {10}: { + hash: common.Hash{10}, + number: 10, + }, + }, + }, + }, + { + name: "number 11", + disjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + {1}: { + hash: common.Hash{1}, + number: 1, + }, + {10}: { + hash: common.Hash{10}, + number: 10, + }, + }, + }, + num: 11, + remaining: map[common.Hash]*pendingBlock{}, + wantDisjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{}, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + tt.disjointBlockSet.removeLowerBlocks(tt.num) + assert.Equal(t, tt.remaining, tt.disjointBlockSet.blocks) + assert.Equal(t, tt.wantDisjointBlockSet, tt.disjointBlockSet) + }) + } +} + +func Test_disjointBlockSet_size(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + disjointBlockSet *disjointBlockSet + want int + }{ + { + name: "expect 0", + disjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{}, + }, + want: 0, + }, + { + name: "expect 1", + disjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + {1}: {hash: common.Hash{1}, number: 1}, + }, + }, + want: 1, + }, + { + name: "expect 2", + disjointBlockSet: &disjointBlockSet{ + blocks: map[common.Hash]*pendingBlock{ + {1}: {hash: common.Hash{1}, number: 1}, + {10}: {hash: common.Hash{10}, number: 10}, + }, + }, + want: 2, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + size := tt.disjointBlockSet.size() + assert.Equal(t, tt.want, size) + }) + } +} diff --git a/dot/sync/interface.go b/dot/sync/interface.go index 7050aa827d..4922a02b12 100644 --- a/dot/sync/interface.go +++ b/dot/sync/interface.go @@ -15,7 +15,10 @@ import ( "github.com/libp2p/go-libp2p-core/peer" ) +// TODO: replace usage of mockery generated mocks with mockgen generated mocks. +// Note: This mockery go:generate is still being used //go:generate mockery --name BlockState --structname BlockState --case underscore --keeptree +//go:generate mockgen -destination=mock_interface_test.go -package=$GOPACKAGE . BlockState,StorageState,CodeSubstitutedState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network // BlockState is the interface for the block state type BlockState interface { @@ -66,29 +69,21 @@ type TransactionState interface { RemoveExtrinsic(ext types.Extrinsic) } -//go:generate mockery --name BabeVerifier --structname BabeVerifier --case underscore --keeptree - // BabeVerifier deals with BABE block verification type BabeVerifier interface { VerifyBlock(header *types.Header) error } -//go:generate mockery --name FinalityGadget --structname FinalityGadget --case underscore --keeptree - // FinalityGadget implements justification verification functionality type FinalityGadget interface { VerifyBlockJustification(common.Hash, []byte) error } -//go:generate mockery --name BlockImportHandler --structname BlockImportHandler --case underscore --keeptree - // BlockImportHandler is the interface for the handler of newly imported blocks type BlockImportHandler interface { HandleBlockImport(block *types.Block, state *rtstorage.TrieState) error } -//go:generate mockery --name Network --structname Network --case underscore --keeptree - // Network is the interface for the network type Network interface { // DoBlockRequest sends a request to the given peer. diff --git a/dot/sync/message_integeration_test.go b/dot/sync/message_integeration_test.go index ac0f34f096..3db0809306 100644 --- a/dot/sync/message_integeration_test.go +++ b/dot/sync/message_integeration_test.go @@ -364,7 +364,7 @@ func TestService_CreateBlockResponse_Descending_EndHash(t *testing.T) { require.Equal(t, uint(1), resp.BlockData[127].Number()) } -func TestService_checkOrGetDescendantHash(t *testing.T) { +func TestService_checkOrGetDescendantHash_integeration(t *testing.T) { t.Parallel() s := newTestSyncer(t) branches := map[uint]int{ diff --git a/dot/sync/message_test.go b/dot/sync/message_test.go new file mode 100644 index 0000000000..5853aa5e8a --- /dev/null +++ b/dot/sync/message_test.go @@ -0,0 +1,430 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "errors" + "testing" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestService_CreateBlockResponse(t *testing.T) { + t.Parallel() + + type args struct { + req *network.BlockRequestMessage + } + tests := map[string]struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + args args + want *network.BlockResponseMessage + err error + }{ + "invalid block request": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + return nil + }, + args: args{req: &network.BlockRequestMessage{}}, + err: ErrInvalidBlockRequest, + }, + "ascending request nil startHash nil endHash": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil).Times(2) + mockBlockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{1, 2}, nil) + return mockBlockState + }, + args: args{req: &network.BlockRequestMessage{ + StartingBlock: *variadic.MustNewUint32OrHash(0), + Direction: network.Ascending, + }}, + want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "ascending request start number higher": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) + return mockBlockState + }, + + args: args{req: &network.BlockRequestMessage{ + StartingBlock: *variadic.MustNewUint32OrHash(2), + Direction: network.Ascending, + }}, + err: errRequestStartTooHigh, + want: nil, + }, + "ascending request endHash not nil": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) + mockBlockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{1, 2}, nil) + mockBlockState.EXPECT().IsDescendantOf(common.Hash{1, 2}, common.Hash{1, 2, 3}).Return(true, + nil).Times(2) + mockBlockState.EXPECT().GetHeader(common.Hash{1, 2}).Return(&types.Header{ + Number: 1, + }, nil) + mockBlockState.EXPECT().GetHeader(common.Hash{1, 2, 3}).Return(&types.Header{ + Number: 2, + }, nil) + mockBlockState.EXPECT().SubChain(common.Hash{1, 2}, common.Hash{1, 2, 3}).Return([]common.Hash{{1, 2}}, + nil) + return mockBlockState + }, + args: args{req: &network.BlockRequestMessage{ + StartingBlock: *variadic.MustNewUint32OrHash(0), + EndBlockHash: &common.Hash{1, 2, 3}, + Direction: network.Ascending, + }}, + want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "descending request nil startHash nil endHash": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) + return mockBlockState + }, + args: args{req: &network.BlockRequestMessage{ + StartingBlock: *variadic.MustNewUint32OrHash(0), + Direction: network.Descending, + }}, + want: &network.BlockResponseMessage{BlockData: []*types.BlockData{}}, + }, + "descending request start number higher": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) + mockBlockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{1, 2}, nil) + return mockBlockState + }, + args: args{req: &network.BlockRequestMessage{ + StartingBlock: *variadic.MustNewUint32OrHash(2), + Direction: network.Descending, + }}, + err: nil, + want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "descending request endHash not nil": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(1), nil) + mockBlockState.EXPECT().GetHashByNumber(uint(0)).Return(common.Hash{1, 2}, nil) + mockBlockState.EXPECT().IsDescendantOf(common.Hash{1, 2, 3}, common.Hash{1, 2}).Return(true, + nil) + mockBlockState.EXPECT().SubChain(common.Hash{1, 2, 3}, common.Hash{1, 2}).Return([]common.Hash{{1, + 2}}, + nil) + return mockBlockState + }, + args: args{req: &network.BlockRequestMessage{ + StartingBlock: *variadic.MustNewUint32OrHash(0), + EndBlockHash: &common.Hash{1, 2, 3}, + Direction: network.Descending, + }}, + want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "ascending request startHash nil endHash": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + Number: 1, + }, nil) + mockBlockState.EXPECT().BestBlockNumber().Return(uint(2), nil) + mockBlockState.EXPECT().GetHashByNumber(uint(2)).Return(common.Hash{1, 2, 3}, nil) + mockBlockState.EXPECT().IsDescendantOf(common.Hash{}, common.Hash{1, 2, 3}).Return(true, + nil) + mockBlockState.EXPECT().SubChain(common.Hash{}, common.Hash{1, 2, 3}).Return([]common.Hash{{1, + 2}}, + nil) + return mockBlockState + }, + args: args{req: &network.BlockRequestMessage{ + StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{}), + Direction: network.Ascending, + }}, + want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "descending request startHash nil endHash": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ + Number: 1, + }, nil) + mockBlockState.EXPECT().GetHeaderByNumber(uint(1)).Return(&types.Header{ + Number: 1, + }, nil) + mockBlockState.EXPECT().SubChain(common.MustHexToHash( + "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), + common.Hash{}).Return([]common.Hash{{1, 2}}, nil) + return mockBlockState + }, + args: args{req: &network.BlockRequestMessage{ + StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{}), + Direction: network.Descending, + }}, + want: &network.BlockResponseMessage{BlockData: []*types.BlockData{{ + Hash: common.Hash{1, 2}, + }}}, + }, + "invalid direction": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + return nil + }, + args: args{req: &network.BlockRequestMessage{ + Direction: network.SyncDirection(3), + }}, + err: errInvalidRequestDirection, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &Service{ + blockState: tt.blockStateBuilder(ctrl), + } + got, err := s.CreateBlockResponse(tt.args.req) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestService_checkOrGetDescendantHash(t *testing.T) { + t.Parallel() + + type args struct { + ancestor common.Hash + descendant *common.Hash + descendantNumber uint + } + tests := map[string]struct { + name string + blockStateBuilder func(ctrl *gomock.Controller) BlockState + args args + want common.Hash + expectedError error + }{ + "nil descendant": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockStateBuilder := NewMockBlockState(ctrl) + mockStateBuilder.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{}, nil) + mockStateBuilder.EXPECT().IsDescendantOf(common.Hash{}, common.Hash{}).Return(true, nil) + return mockStateBuilder + }, + args: args{ancestor: common.Hash{}, descendant: nil, descendantNumber: 1}, + }, + "not nil descendant": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) + mockBlockState.EXPECT().IsDescendantOf(common.Hash{}, common.Hash{1, 2}).Return(true, nil) + return mockBlockState + }, + args: args{ancestor: common.Hash{0}, descendant: &common.Hash{1, 2}, descendantNumber: 1}, + want: common.Hash{1, 2}, + }, + "descendant greater than header": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{2}).Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + args: args{ancestor: common.Hash{2}, descendant: &common.Hash{1, 2}, descendantNumber: 1}, + want: common.Hash{}, + expectedError: errors.New("invalid request, descendant number 2 is higher than ancestor 1"), + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &Service{ + blockState: tt.blockStateBuilder(ctrl), + } + got, err := s.checkOrGetDescendantHash(tt.args.ancestor, tt.args.descendant, tt.args.descendantNumber) + if tt.expectedError != nil { + assert.EqualError(t, err, tt.expectedError.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestService_getBlockData(t *testing.T) { + t.Parallel() + + type args struct { + hash common.Hash + requestedData byte + } + tests := map[string]struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + args args + want *types.BlockData + err error + }{ + "requestedData 0": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + return nil + }, + args: args{ + hash: common.Hash{}, + requestedData: 0, + }, + want: &types.BlockData{}, + }, + "requestedData RequestedDataHeader error": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, errors.New("empty hash")) + return mockBlockState + }, + args: args{ + hash: common.Hash{0}, + requestedData: network.RequestedDataHeader, + }, + want: &types.BlockData{ + Hash: common.Hash{}, + }, + }, + "requestedData RequestedDataHeader": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHeader(common.Hash{1}).Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{1}, + requestedData: network.RequestedDataHeader, + }, + want: &types.BlockData{ + Hash: common.Hash{1}, + Header: &types.Header{ + Number: 2, + }, + }, + }, + "requestedData RequestedDataBody error": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetBlockBody(common.Hash{}).Return(nil, errors.New("empty hash")) + return mockBlockState + }, + + args: args{ + hash: common.Hash{}, + requestedData: network.RequestedDataBody, + }, + want: &types.BlockData{ + Hash: common.Hash{}, + }, + }, + "requestedData RequestedDataBody": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetBlockBody(common.Hash{1}).Return(&types.Body{[]byte{1}}, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{1}, + requestedData: network.RequestedDataBody, + }, + want: &types.BlockData{ + Hash: common.Hash{1}, + Body: &types.Body{[]byte{1}}, + }, + }, + "requestedData RequestedDataReceipt": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetReceipt(common.Hash{1}).Return([]byte{1}, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{1}, + requestedData: network.RequestedDataReceipt, + }, + want: &types.BlockData{ + Hash: common.Hash{1}, + Receipt: &[]byte{1}, + }, + }, + "requestedData RequestedDataMessageQueue": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetMessageQueue(common.Hash{2}).Return([]byte{2}, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{2}, + requestedData: network.RequestedDataMessageQueue, + }, + want: &types.BlockData{ + Hash: common.Hash{2}, + MessageQueue: &[]byte{2}, + }, + }, + "requestedData RequestedDataJustification": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetJustification(common.Hash{3}).Return([]byte{3}, nil) + return mockBlockState + }, + args: args{ + hash: common.Hash{3}, + requestedData: network.RequestedDataJustification, + }, + want: &types.BlockData{ + Hash: common.Hash{3}, + Justification: &[]byte{3}, + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &Service{ + blockState: tt.blockStateBuilder(ctrl), + } + got, err := s.getBlockData(tt.args.hash, tt.args.requestedData) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/dot/sync/mock_chain_processor_test.go b/dot/sync/mock_chain_processor_test.go new file mode 100644 index 0000000000..5eeaa0d450 --- /dev/null +++ b/dot/sync/mock_chain_processor_test.go @@ -0,0 +1,58 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: ChainProcessor) + +// Package sync is a generated GoMock package. +package sync + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockChainProcessor is a mock of ChainProcessor interface. +type MockChainProcessor struct { + ctrl *gomock.Controller + recorder *MockChainProcessorMockRecorder +} + +// MockChainProcessorMockRecorder is the mock recorder for MockChainProcessor. +type MockChainProcessorMockRecorder struct { + mock *MockChainProcessor +} + +// NewMockChainProcessor creates a new mock instance. +func NewMockChainProcessor(ctrl *gomock.Controller) *MockChainProcessor { + mock := &MockChainProcessor{ctrl: ctrl} + mock.recorder = &MockChainProcessorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockChainProcessor) EXPECT() *MockChainProcessorMockRecorder { + return m.recorder +} + +// start mocks base method. +func (m *MockChainProcessor) start() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "start") +} + +// start indicates an expected call of start. +func (mr *MockChainProcessorMockRecorder) start() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "start", reflect.TypeOf((*MockChainProcessor)(nil).start)) +} + +// stop mocks base method. +func (m *MockChainProcessor) stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "stop") +} + +// stop indicates an expected call of stop. +func (mr *MockChainProcessorMockRecorder) stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "stop", reflect.TypeOf((*MockChainProcessor)(nil).stop)) +} diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go index 43f15db810..cf4d4c3c71 100644 --- a/dot/sync/mock_chain_sync_test.go +++ b/dot/sync/mock_chain_sync_test.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: ChainSync) +// Source: chain_sync.go // Package sync is a generated GoMock package. package sync @@ -13,6 +13,88 @@ import ( peer "github.com/libp2p/go-libp2p-core/peer" ) +// MockworkHandler is a mock of workHandler interface. +type MockworkHandler struct { + ctrl *gomock.Controller + recorder *MockworkHandlerMockRecorder +} + +// MockworkHandlerMockRecorder is the mock recorder for MockworkHandler. +type MockworkHandlerMockRecorder struct { + mock *MockworkHandler +} + +// NewMockworkHandler creates a new mock instance. +func NewMockworkHandler(ctrl *gomock.Controller) *MockworkHandler { + mock := &MockworkHandler{ctrl: ctrl} + mock.recorder = &MockworkHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockworkHandler) EXPECT() *MockworkHandlerMockRecorder { + return m.recorder +} + +// handleNewPeerState mocks base method. +func (m *MockworkHandler) handleNewPeerState(arg0 *peerState) (*worker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "handleNewPeerState", arg0) + ret0, _ := ret[0].(*worker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// handleNewPeerState indicates an expected call of handleNewPeerState. +func (mr *MockworkHandlerMockRecorder) handleNewPeerState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleNewPeerState", reflect.TypeOf((*MockworkHandler)(nil).handleNewPeerState), arg0) +} + +// handleTick mocks base method. +func (m *MockworkHandler) handleTick() ([]*worker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "handleTick") + ret0, _ := ret[0].([]*worker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// handleTick indicates an expected call of handleTick. +func (mr *MockworkHandlerMockRecorder) handleTick() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleTick", reflect.TypeOf((*MockworkHandler)(nil).handleTick)) +} + +// handleWorkerResult mocks base method. +func (m *MockworkHandler) handleWorkerResult(w *worker) (*worker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "handleWorkerResult", w) + ret0, _ := ret[0].(*worker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// handleWorkerResult indicates an expected call of handleWorkerResult. +func (mr *MockworkHandlerMockRecorder) handleWorkerResult(w interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleWorkerResult", reflect.TypeOf((*MockworkHandler)(nil).handleWorkerResult), w) +} + +// hasCurrentWorker mocks base method. +func (m *MockworkHandler) hasCurrentWorker(arg0 *worker, arg1 map[uint64]*worker) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "hasCurrentWorker", arg0, arg1) + ret0, _ := ret[0].(bool) + return ret0 +} + +// hasCurrentWorker indicates an expected call of hasCurrentWorker. +func (mr *MockworkHandlerMockRecorder) hasCurrentWorker(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "hasCurrentWorker", reflect.TypeOf((*MockworkHandler)(nil).hasCurrentWorker), arg0, arg1) +} + // MockChainSync is a mock of ChainSync interface. type MockChainSync struct { ctrl *gomock.Controller @@ -52,31 +134,31 @@ func (mr *MockChainSyncMockRecorder) getHighestBlock() *gomock.Call { } // setBlockAnnounce mocks base method. -func (m *MockChainSync) setBlockAnnounce(arg0 peer.ID, arg1 *types.Header) error { +func (m *MockChainSync) setBlockAnnounce(from peer.ID, header *types.Header) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "setBlockAnnounce", arg0, arg1) + ret := m.ctrl.Call(m, "setBlockAnnounce", from, header) ret0, _ := ret[0].(error) return ret0 } // setBlockAnnounce indicates an expected call of setBlockAnnounce. -func (mr *MockChainSyncMockRecorder) setBlockAnnounce(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainSyncMockRecorder) setBlockAnnounce(from, header interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setBlockAnnounce", reflect.TypeOf((*MockChainSync)(nil).setBlockAnnounce), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setBlockAnnounce", reflect.TypeOf((*MockChainSync)(nil).setBlockAnnounce), from, header) } // setPeerHead mocks base method. -func (m *MockChainSync) setPeerHead(arg0 peer.ID, arg1 common.Hash, arg2 uint) error { +func (m *MockChainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "setPeerHead", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "setPeerHead", p, hash, number) ret0, _ := ret[0].(error) return ret0 } // setPeerHead indicates an expected call of setPeerHead. -func (mr *MockChainSyncMockRecorder) setPeerHead(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockChainSyncMockRecorder) setPeerHead(p, hash, number interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setPeerHead", reflect.TypeOf((*MockChainSync)(nil).setPeerHead), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setPeerHead", reflect.TypeOf((*MockChainSync)(nil).setPeerHead), p, hash, number) } // start mocks base method. diff --git a/dot/sync/mock_disjoint_block_set_test.go b/dot/sync/mock_disjoint_block_set_test.go new file mode 100644 index 0000000000..48e791701e --- /dev/null +++ b/dot/sync/mock_disjoint_block_set_test.go @@ -0,0 +1,212 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: DisjointBlockSet) + +// Package sync is a generated GoMock package. +package sync + +import ( + reflect "reflect" + + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + gomock "github.com/golang/mock/gomock" +) + +// MockDisjointBlockSet is a mock of DisjointBlockSet interface. +type MockDisjointBlockSet struct { + ctrl *gomock.Controller + recorder *MockDisjointBlockSetMockRecorder +} + +// MockDisjointBlockSetMockRecorder is the mock recorder for MockDisjointBlockSet. +type MockDisjointBlockSetMockRecorder struct { + mock *MockDisjointBlockSet +} + +// NewMockDisjointBlockSet creates a new mock instance. +func NewMockDisjointBlockSet(ctrl *gomock.Controller) *MockDisjointBlockSet { + mock := &MockDisjointBlockSet{ctrl: ctrl} + mock.recorder = &MockDisjointBlockSetMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDisjointBlockSet) EXPECT() *MockDisjointBlockSetMockRecorder { + return m.recorder +} + +// addBlock mocks base method. +func (m *MockDisjointBlockSet) addBlock(arg0 *types.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "addBlock", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// addBlock indicates an expected call of addBlock. +func (mr *MockDisjointBlockSetMockRecorder) addBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "addBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).addBlock), arg0) +} + +// addHashAndNumber mocks base method. +func (m *MockDisjointBlockSet) addHashAndNumber(arg0 common.Hash, arg1 uint) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "addHashAndNumber", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// addHashAndNumber indicates an expected call of addHashAndNumber. +func (mr *MockDisjointBlockSetMockRecorder) addHashAndNumber(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "addHashAndNumber", reflect.TypeOf((*MockDisjointBlockSet)(nil).addHashAndNumber), arg0, arg1) +} + +// addHeader mocks base method. +func (m *MockDisjointBlockSet) addHeader(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "addHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// addHeader indicates an expected call of addHeader. +func (mr *MockDisjointBlockSetMockRecorder) addHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "addHeader", reflect.TypeOf((*MockDisjointBlockSet)(nil).addHeader), arg0) +} + +// addJustification mocks base method. +func (m *MockDisjointBlockSet) addJustification(arg0 common.Hash, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "addJustification", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// addJustification indicates an expected call of addJustification. +func (mr *MockDisjointBlockSetMockRecorder) addJustification(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "addJustification", reflect.TypeOf((*MockDisjointBlockSet)(nil).addJustification), arg0, arg1) +} + +// getBlock mocks base method. +func (m *MockDisjointBlockSet) getBlock(arg0 common.Hash) *pendingBlock { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getBlock", arg0) + ret0, _ := ret[0].(*pendingBlock) + return ret0 +} + +// getBlock indicates an expected call of getBlock. +func (mr *MockDisjointBlockSetMockRecorder) getBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).getBlock), arg0) +} + +// getBlocks mocks base method. +func (m *MockDisjointBlockSet) getBlocks() []*pendingBlock { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getBlocks") + ret0, _ := ret[0].([]*pendingBlock) + return ret0 +} + +// getBlocks indicates an expected call of getBlocks. +func (mr *MockDisjointBlockSetMockRecorder) getBlocks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getBlocks", reflect.TypeOf((*MockDisjointBlockSet)(nil).getBlocks)) +} + +// getChildren mocks base method. +func (m *MockDisjointBlockSet) getChildren(arg0 common.Hash) map[common.Hash]struct{} { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getChildren", arg0) + ret0, _ := ret[0].(map[common.Hash]struct{}) + return ret0 +} + +// getChildren indicates an expected call of getChildren. +func (mr *MockDisjointBlockSetMockRecorder) getChildren(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getChildren", reflect.TypeOf((*MockDisjointBlockSet)(nil).getChildren), arg0) +} + +// getReadyDescendants mocks base method. +func (m *MockDisjointBlockSet) getReadyDescendants(arg0 common.Hash, arg1 []*types.BlockData) []*types.BlockData { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getReadyDescendants", arg0, arg1) + ret0, _ := ret[0].([]*types.BlockData) + return ret0 +} + +// getReadyDescendants indicates an expected call of getReadyDescendants. +func (mr *MockDisjointBlockSetMockRecorder) getReadyDescendants(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getReadyDescendants", reflect.TypeOf((*MockDisjointBlockSet)(nil).getReadyDescendants), arg0, arg1) +} + +// hasBlock mocks base method. +func (m *MockDisjointBlockSet) hasBlock(arg0 common.Hash) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "hasBlock", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// hasBlock indicates an expected call of hasBlock. +func (mr *MockDisjointBlockSetMockRecorder) hasBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "hasBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).hasBlock), arg0) +} + +// removeBlock mocks base method. +func (m *MockDisjointBlockSet) removeBlock(arg0 common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "removeBlock", arg0) +} + +// removeBlock indicates an expected call of removeBlock. +func (mr *MockDisjointBlockSetMockRecorder) removeBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "removeBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).removeBlock), arg0) +} + +// removeLowerBlocks mocks base method. +func (m *MockDisjointBlockSet) removeLowerBlocks(arg0 uint) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "removeLowerBlocks", arg0) +} + +// removeLowerBlocks indicates an expected call of removeLowerBlocks. +func (mr *MockDisjointBlockSetMockRecorder) removeLowerBlocks(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "removeLowerBlocks", reflect.TypeOf((*MockDisjointBlockSet)(nil).removeLowerBlocks), arg0) +} + +// run mocks base method. +func (m *MockDisjointBlockSet) run(arg0 <-chan struct{}) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "run", arg0) +} + +// run indicates an expected call of run. +func (mr *MockDisjointBlockSetMockRecorder) run(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "run", reflect.TypeOf((*MockDisjointBlockSet)(nil).run), arg0) +} + +// size mocks base method. +func (m *MockDisjointBlockSet) size() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "size") + ret0, _ := ret[0].(int) + return ret0 +} + +// size indicates an expected call of size. +func (mr *MockDisjointBlockSetMockRecorder) size() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "size", reflect.TypeOf((*MockDisjointBlockSet)(nil).size)) +} diff --git a/dot/sync/mock_instance_test.go b/dot/sync/mock_instance_test.go new file mode 100644 index 0000000000..ce3af4cb60 --- /dev/null +++ b/dot/sync/mock_instance_test.go @@ -0,0 +1,404 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/lib/runtime (interfaces: Instance) + +// Package sync is a generated GoMock package. +package sync + +import ( + reflect "reflect" + + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + keystore "github.com/ChainSafe/gossamer/lib/keystore" + runtime "github.com/ChainSafe/gossamer/lib/runtime" + transaction "github.com/ChainSafe/gossamer/lib/transaction" + gomock "github.com/golang/mock/gomock" +) + +// MockInstance is a mock of Instance interface. +type MockInstance struct { + ctrl *gomock.Controller + recorder *MockInstanceMockRecorder +} + +// MockInstanceMockRecorder is the mock recorder for MockInstance. +type MockInstanceMockRecorder struct { + mock *MockInstance +} + +// NewMockInstance creates a new mock instance. +func NewMockInstance(ctrl *gomock.Controller) *MockInstance { + mock := &MockInstance{ctrl: ctrl} + mock.recorder = &MockInstanceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInstance) EXPECT() *MockInstanceMockRecorder { + return m.recorder +} + +// ApplyExtrinsic mocks base method. +func (m *MockInstance) ApplyExtrinsic(arg0 types.Extrinsic) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyExtrinsic", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ApplyExtrinsic indicates an expected call of ApplyExtrinsic. +func (mr *MockInstanceMockRecorder) ApplyExtrinsic(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyExtrinsic", reflect.TypeOf((*MockInstance)(nil).ApplyExtrinsic), arg0) +} + +// BabeConfiguration mocks base method. +func (m *MockInstance) BabeConfiguration() (*types.BabeConfiguration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BabeConfiguration") + ret0, _ := ret[0].(*types.BabeConfiguration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BabeConfiguration indicates an expected call of BabeConfiguration. +func (mr *MockInstanceMockRecorder) BabeConfiguration() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeConfiguration", reflect.TypeOf((*MockInstance)(nil).BabeConfiguration)) +} + +// CheckInherents mocks base method. +func (m *MockInstance) CheckInherents() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "CheckInherents") +} + +// CheckInherents indicates an expected call of CheckInherents. +func (mr *MockInstanceMockRecorder) CheckInherents() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckInherents", reflect.TypeOf((*MockInstance)(nil).CheckInherents)) +} + +// CheckRuntimeVersion mocks base method. +func (m *MockInstance) CheckRuntimeVersion(arg0 []byte) (runtime.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckRuntimeVersion", arg0) + ret0, _ := ret[0].(runtime.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CheckRuntimeVersion indicates an expected call of CheckRuntimeVersion. +func (mr *MockInstanceMockRecorder) CheckRuntimeVersion(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckRuntimeVersion", reflect.TypeOf((*MockInstance)(nil).CheckRuntimeVersion), arg0) +} + +// DecodeSessionKeys mocks base method. +func (m *MockInstance) DecodeSessionKeys(arg0 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DecodeSessionKeys", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DecodeSessionKeys indicates an expected call of DecodeSessionKeys. +func (mr *MockInstanceMockRecorder) DecodeSessionKeys(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecodeSessionKeys", reflect.TypeOf((*MockInstance)(nil).DecodeSessionKeys), arg0) +} + +// Exec mocks base method. +func (m *MockInstance) Exec(arg0 string, arg1 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Exec", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Exec indicates an expected call of Exec. +func (mr *MockInstanceMockRecorder) Exec(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockInstance)(nil).Exec), arg0, arg1) +} + +// ExecuteBlock mocks base method. +func (m *MockInstance) ExecuteBlock(arg0 *types.Block) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteBlock", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteBlock indicates an expected call of ExecuteBlock. +func (mr *MockInstanceMockRecorder) ExecuteBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteBlock", reflect.TypeOf((*MockInstance)(nil).ExecuteBlock), arg0) +} + +// FinalizeBlock mocks base method. +func (m *MockInstance) FinalizeBlock() (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FinalizeBlock") + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FinalizeBlock indicates an expected call of FinalizeBlock. +func (mr *MockInstanceMockRecorder) FinalizeBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeBlock", reflect.TypeOf((*MockInstance)(nil).FinalizeBlock)) +} + +// GenerateSessionKeys mocks base method. +func (m *MockInstance) GenerateSessionKeys() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "GenerateSessionKeys") +} + +// GenerateSessionKeys indicates an expected call of GenerateSessionKeys. +func (mr *MockInstanceMockRecorder) GenerateSessionKeys() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateSessionKeys", reflect.TypeOf((*MockInstance)(nil).GenerateSessionKeys)) +} + +// GetCodeHash mocks base method. +func (m *MockInstance) GetCodeHash() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCodeHash") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// GetCodeHash indicates an expected call of GetCodeHash. +func (mr *MockInstanceMockRecorder) GetCodeHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCodeHash", reflect.TypeOf((*MockInstance)(nil).GetCodeHash)) +} + +// GrandpaAuthorities mocks base method. +func (m *MockInstance) GrandpaAuthorities() ([]types.Authority, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GrandpaAuthorities") + ret0, _ := ret[0].([]types.Authority) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GrandpaAuthorities indicates an expected call of GrandpaAuthorities. +func (mr *MockInstanceMockRecorder) GrandpaAuthorities() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaAuthorities", reflect.TypeOf((*MockInstance)(nil).GrandpaAuthorities)) +} + +// InherentExtrinsics mocks base method. +func (m *MockInstance) InherentExtrinsics(arg0 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InherentExtrinsics", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InherentExtrinsics indicates an expected call of InherentExtrinsics. +func (mr *MockInstanceMockRecorder) InherentExtrinsics(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InherentExtrinsics", reflect.TypeOf((*MockInstance)(nil).InherentExtrinsics), arg0) +} + +// InitializeBlock mocks base method. +func (m *MockInstance) InitializeBlock(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InitializeBlock", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// InitializeBlock indicates an expected call of InitializeBlock. +func (mr *MockInstanceMockRecorder) InitializeBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeBlock", reflect.TypeOf((*MockInstance)(nil).InitializeBlock), arg0) +} + +// Keystore mocks base method. +func (m *MockInstance) Keystore() *keystore.GlobalKeystore { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Keystore") + ret0, _ := ret[0].(*keystore.GlobalKeystore) + return ret0 +} + +// Keystore indicates an expected call of Keystore. +func (mr *MockInstanceMockRecorder) Keystore() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Keystore", reflect.TypeOf((*MockInstance)(nil).Keystore)) +} + +// Metadata mocks base method. +func (m *MockInstance) Metadata() ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Metadata") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Metadata indicates an expected call of Metadata. +func (mr *MockInstanceMockRecorder) Metadata() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadata", reflect.TypeOf((*MockInstance)(nil).Metadata)) +} + +// NetworkService mocks base method. +func (m *MockInstance) NetworkService() runtime.BasicNetwork { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetworkService") + ret0, _ := ret[0].(runtime.BasicNetwork) + return ret0 +} + +// NetworkService indicates an expected call of NetworkService. +func (mr *MockInstanceMockRecorder) NetworkService() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkService", reflect.TypeOf((*MockInstance)(nil).NetworkService)) +} + +// NodeStorage mocks base method. +func (m *MockInstance) NodeStorage() runtime.NodeStorage { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeStorage") + ret0, _ := ret[0].(runtime.NodeStorage) + return ret0 +} + +// NodeStorage indicates an expected call of NodeStorage. +func (mr *MockInstanceMockRecorder) NodeStorage() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStorage", reflect.TypeOf((*MockInstance)(nil).NodeStorage)) +} + +// OffchainWorker mocks base method. +func (m *MockInstance) OffchainWorker() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OffchainWorker") +} + +// OffchainWorker indicates an expected call of OffchainWorker. +func (mr *MockInstanceMockRecorder) OffchainWorker() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OffchainWorker", reflect.TypeOf((*MockInstance)(nil).OffchainWorker)) +} + +// PaymentQueryInfo mocks base method. +func (m *MockInstance) PaymentQueryInfo(arg0 []byte) (*types.TransactionPaymentQueryInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaymentQueryInfo", arg0) + ret0, _ := ret[0].(*types.TransactionPaymentQueryInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaymentQueryInfo indicates an expected call of PaymentQueryInfo. +func (mr *MockInstanceMockRecorder) PaymentQueryInfo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaymentQueryInfo", reflect.TypeOf((*MockInstance)(nil).PaymentQueryInfo), arg0) +} + +// RandomSeed mocks base method. +func (m *MockInstance) RandomSeed() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RandomSeed") +} + +// RandomSeed indicates an expected call of RandomSeed. +func (mr *MockInstanceMockRecorder) RandomSeed() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RandomSeed", reflect.TypeOf((*MockInstance)(nil).RandomSeed)) +} + +// SetContextStorage mocks base method. +func (m *MockInstance) SetContextStorage(arg0 runtime.Storage) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetContextStorage", arg0) +} + +// SetContextStorage indicates an expected call of SetContextStorage. +func (mr *MockInstanceMockRecorder) SetContextStorage(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetContextStorage", reflect.TypeOf((*MockInstance)(nil).SetContextStorage), arg0) +} + +// Stop mocks base method. +func (m *MockInstance) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockInstanceMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockInstance)(nil).Stop)) +} + +// UpdateRuntimeCode mocks base method. +func (m *MockInstance) UpdateRuntimeCode(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateRuntimeCode", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateRuntimeCode indicates an expected call of UpdateRuntimeCode. +func (mr *MockInstanceMockRecorder) UpdateRuntimeCode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRuntimeCode", reflect.TypeOf((*MockInstance)(nil).UpdateRuntimeCode), arg0) +} + +// ValidateTransaction mocks base method. +func (m *MockInstance) ValidateTransaction(arg0 types.Extrinsic) (*transaction.Validity, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateTransaction", arg0) + ret0, _ := ret[0].(*transaction.Validity) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateTransaction indicates an expected call of ValidateTransaction. +func (mr *MockInstanceMockRecorder) ValidateTransaction(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateTransaction", reflect.TypeOf((*MockInstance)(nil).ValidateTransaction), arg0) +} + +// Validator mocks base method. +func (m *MockInstance) Validator() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Validator") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Validator indicates an expected call of Validator. +func (mr *MockInstanceMockRecorder) Validator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validator", reflect.TypeOf((*MockInstance)(nil).Validator)) +} + +// Version mocks base method. +func (m *MockInstance) Version() (runtime.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version") + ret0, _ := ret[0].(runtime.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockInstanceMockRecorder) Version() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockInstance)(nil).Version)) +} diff --git a/dot/sync/mock_interface_test.go b/dot/sync/mock_interface_test.go new file mode 100644 index 0000000000..c34f41db3e --- /dev/null +++ b/dot/sync/mock_interface_test.go @@ -0,0 +1,773 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: BlockState,StorageState,CodeSubstitutedState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network) + +// Package sync is a generated GoMock package. +package sync + +import ( + reflect "reflect" + + network "github.com/ChainSafe/gossamer/dot/network" + peerset "github.com/ChainSafe/gossamer/dot/peerset" + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + runtime "github.com/ChainSafe/gossamer/lib/runtime" + storage "github.com/ChainSafe/gossamer/lib/runtime/storage" + gomock "github.com/golang/mock/gomock" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// MockBlockState is a mock of BlockState interface. +type MockBlockState struct { + ctrl *gomock.Controller + recorder *MockBlockStateMockRecorder +} + +// MockBlockStateMockRecorder is the mock recorder for MockBlockState. +type MockBlockStateMockRecorder struct { + mock *MockBlockState +} + +// NewMockBlockState creates a new mock instance. +func NewMockBlockState(ctrl *gomock.Controller) *MockBlockState { + mock := &MockBlockState{ctrl: ctrl} + mock.recorder = &MockBlockStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBlockState) EXPECT() *MockBlockStateMockRecorder { + return m.recorder +} + +// AddBlock mocks base method. +func (m *MockBlockState) AddBlock(arg0 *types.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddBlock", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddBlock indicates an expected call of AddBlock. +func (mr *MockBlockStateMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockBlockState)(nil).AddBlock), arg0) +} + +// AddBlockToBlockTree mocks base method. +func (m *MockBlockState) AddBlockToBlockTree(arg0 *types.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddBlockToBlockTree", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddBlockToBlockTree indicates an expected call of AddBlockToBlockTree. +func (mr *MockBlockStateMockRecorder) AddBlockToBlockTree(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlockToBlockTree", reflect.TypeOf((*MockBlockState)(nil).AddBlockToBlockTree), arg0) +} + +// BestBlockHash mocks base method. +func (m *MockBlockState) BestBlockHash() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BestBlockHash") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// BestBlockHash indicates an expected call of BestBlockHash. +func (mr *MockBlockStateMockRecorder) BestBlockHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockHash", reflect.TypeOf((*MockBlockState)(nil).BestBlockHash)) +} + +// BestBlockHeader mocks base method. +func (m *MockBlockState) BestBlockHeader() (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BestBlockHeader") + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BestBlockHeader indicates an expected call of BestBlockHeader. +func (mr *MockBlockStateMockRecorder) BestBlockHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockHeader", reflect.TypeOf((*MockBlockState)(nil).BestBlockHeader)) +} + +// BestBlockNumber mocks base method. +func (m *MockBlockState) BestBlockNumber() (uint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BestBlockNumber") + ret0, _ := ret[0].(uint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BestBlockNumber indicates an expected call of BestBlockNumber. +func (mr *MockBlockStateMockRecorder) BestBlockNumber() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockNumber", reflect.TypeOf((*MockBlockState)(nil).BestBlockNumber)) +} + +// CompareAndSetBlockData mocks base method. +func (m *MockBlockState) CompareAndSetBlockData(arg0 *types.BlockData) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompareAndSetBlockData", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// CompareAndSetBlockData indicates an expected call of CompareAndSetBlockData. +func (mr *MockBlockStateMockRecorder) CompareAndSetBlockData(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompareAndSetBlockData", reflect.TypeOf((*MockBlockState)(nil).CompareAndSetBlockData), arg0) +} + +// GetAllBlocksAtNumber mocks base method. +func (m *MockBlockState) GetAllBlocksAtNumber(arg0 uint) ([]common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllBlocksAtNumber", arg0) + ret0, _ := ret[0].([]common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllBlocksAtNumber indicates an expected call of GetAllBlocksAtNumber. +func (mr *MockBlockStateMockRecorder) GetAllBlocksAtNumber(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllBlocksAtNumber", reflect.TypeOf((*MockBlockState)(nil).GetAllBlocksAtNumber), arg0) +} + +// GetBlockBody mocks base method. +func (m *MockBlockState) GetBlockBody(arg0 common.Hash) (*types.Body, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockBody", arg0) + ret0, _ := ret[0].(*types.Body) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockBody indicates an expected call of GetBlockBody. +func (mr *MockBlockStateMockRecorder) GetBlockBody(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockBody", reflect.TypeOf((*MockBlockState)(nil).GetBlockBody), arg0) +} + +// GetBlockByHash mocks base method. +func (m *MockBlockState) GetBlockByHash(arg0 common.Hash) (*types.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockByHash", arg0) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockByHash indicates an expected call of GetBlockByHash. +func (mr *MockBlockStateMockRecorder) GetBlockByHash(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByHash", reflect.TypeOf((*MockBlockState)(nil).GetBlockByHash), arg0) +} + +// GetBlockByNumber mocks base method. +func (m *MockBlockState) GetBlockByNumber(arg0 uint) (*types.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockByNumber", arg0) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockByNumber indicates an expected call of GetBlockByNumber. +func (mr *MockBlockStateMockRecorder) GetBlockByNumber(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByNumber", reflect.TypeOf((*MockBlockState)(nil).GetBlockByNumber), arg0) +} + +// GetFinalisedNotifierChannel mocks base method. +func (m *MockBlockState) GetFinalisedNotifierChannel() chan *types.FinalisationInfo { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFinalisedNotifierChannel") + ret0, _ := ret[0].(chan *types.FinalisationInfo) + return ret0 +} + +// GetFinalisedNotifierChannel indicates an expected call of GetFinalisedNotifierChannel. +func (mr *MockBlockStateMockRecorder) GetFinalisedNotifierChannel() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFinalisedNotifierChannel", reflect.TypeOf((*MockBlockState)(nil).GetFinalisedNotifierChannel)) +} + +// GetHashByNumber mocks base method. +func (m *MockBlockState) GetHashByNumber(arg0 uint) (common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHashByNumber", arg0) + ret0, _ := ret[0].(common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHashByNumber indicates an expected call of GetHashByNumber. +func (mr *MockBlockStateMockRecorder) GetHashByNumber(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHashByNumber", reflect.TypeOf((*MockBlockState)(nil).GetHashByNumber), arg0) +} + +// GetHeader mocks base method. +func (m *MockBlockState) GetHeader(arg0 common.Hash) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeader", arg0) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHeader indicates an expected call of GetHeader. +func (mr *MockBlockStateMockRecorder) GetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockBlockState)(nil).GetHeader), arg0) +} + +// GetHeaderByNumber mocks base method. +func (m *MockBlockState) GetHeaderByNumber(arg0 uint) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeaderByNumber", arg0) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHeaderByNumber indicates an expected call of GetHeaderByNumber. +func (mr *MockBlockStateMockRecorder) GetHeaderByNumber(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeaderByNumber", reflect.TypeOf((*MockBlockState)(nil).GetHeaderByNumber), arg0) +} + +// GetHighestFinalisedHeader mocks base method. +func (m *MockBlockState) GetHighestFinalisedHeader() (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestFinalisedHeader") + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestFinalisedHeader indicates an expected call of GetHighestFinalisedHeader. +func (mr *MockBlockStateMockRecorder) GetHighestFinalisedHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestFinalisedHeader", reflect.TypeOf((*MockBlockState)(nil).GetHighestFinalisedHeader)) +} + +// GetJustification mocks base method. +func (m *MockBlockState) GetJustification(arg0 common.Hash) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetJustification", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetJustification indicates an expected call of GetJustification. +func (mr *MockBlockStateMockRecorder) GetJustification(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetJustification", reflect.TypeOf((*MockBlockState)(nil).GetJustification), arg0) +} + +// GetMessageQueue mocks base method. +func (m *MockBlockState) GetMessageQueue(arg0 common.Hash) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMessageQueue", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMessageQueue indicates an expected call of GetMessageQueue. +func (mr *MockBlockStateMockRecorder) GetMessageQueue(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessageQueue", reflect.TypeOf((*MockBlockState)(nil).GetMessageQueue), arg0) +} + +// GetReceipt mocks base method. +func (m *MockBlockState) GetReceipt(arg0 common.Hash) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetReceipt", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetReceipt indicates an expected call of GetReceipt. +func (mr *MockBlockStateMockRecorder) GetReceipt(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReceipt", reflect.TypeOf((*MockBlockState)(nil).GetReceipt), arg0) +} + +// GetRuntime mocks base method. +func (m *MockBlockState) GetRuntime(arg0 *common.Hash) (runtime.Instance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRuntime", arg0) + ret0, _ := ret[0].(runtime.Instance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRuntime indicates an expected call of GetRuntime. +func (mr *MockBlockStateMockRecorder) GetRuntime(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRuntime", reflect.TypeOf((*MockBlockState)(nil).GetRuntime), arg0) +} + +// HasBlockBody mocks base method. +func (m *MockBlockState) HasBlockBody(arg0 common.Hash) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasBlockBody", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasBlockBody indicates an expected call of HasBlockBody. +func (mr *MockBlockStateMockRecorder) HasBlockBody(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasBlockBody", reflect.TypeOf((*MockBlockState)(nil).HasBlockBody), arg0) +} + +// HasHeader mocks base method. +func (m *MockBlockState) HasHeader(arg0 common.Hash) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasHeader", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasHeader indicates an expected call of HasHeader. +func (mr *MockBlockStateMockRecorder) HasHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasHeader", reflect.TypeOf((*MockBlockState)(nil).HasHeader), arg0) +} + +// IsDescendantOf mocks base method. +func (m *MockBlockState) IsDescendantOf(arg0, arg1 common.Hash) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsDescendantOf", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsDescendantOf indicates an expected call of IsDescendantOf. +func (mr *MockBlockStateMockRecorder) IsDescendantOf(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDescendantOf", reflect.TypeOf((*MockBlockState)(nil).IsDescendantOf), arg0, arg1) +} + +// SetFinalisedHash mocks base method. +func (m *MockBlockState) SetFinalisedHash(arg0 common.Hash, arg1, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetFinalisedHash", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetFinalisedHash indicates an expected call of SetFinalisedHash. +func (mr *MockBlockStateMockRecorder) SetFinalisedHash(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalisedHash", reflect.TypeOf((*MockBlockState)(nil).SetFinalisedHash), arg0, arg1, arg2) +} + +// SetHeader mocks base method. +func (m *MockBlockState) SetHeader(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockBlockStateMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockBlockState)(nil).SetHeader), arg0) +} + +// SetJustification mocks base method. +func (m *MockBlockState) SetJustification(arg0 common.Hash, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetJustification", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetJustification indicates an expected call of SetJustification. +func (mr *MockBlockStateMockRecorder) SetJustification(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetJustification", reflect.TypeOf((*MockBlockState)(nil).SetJustification), arg0, arg1) +} + +// StoreRuntime mocks base method. +func (m *MockBlockState) StoreRuntime(arg0 common.Hash, arg1 runtime.Instance) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StoreRuntime", arg0, arg1) +} + +// StoreRuntime indicates an expected call of StoreRuntime. +func (mr *MockBlockStateMockRecorder) StoreRuntime(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreRuntime", reflect.TypeOf((*MockBlockState)(nil).StoreRuntime), arg0, arg1) +} + +// SubChain mocks base method. +func (m *MockBlockState) SubChain(arg0, arg1 common.Hash) ([]common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubChain", arg0, arg1) + ret0, _ := ret[0].([]common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubChain indicates an expected call of SubChain. +func (mr *MockBlockStateMockRecorder) SubChain(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubChain", reflect.TypeOf((*MockBlockState)(nil).SubChain), arg0, arg1) +} + +// MockStorageState is a mock of StorageState interface. +type MockStorageState struct { + ctrl *gomock.Controller + recorder *MockStorageStateMockRecorder +} + +// MockStorageStateMockRecorder is the mock recorder for MockStorageState. +type MockStorageStateMockRecorder struct { + mock *MockStorageState +} + +// NewMockStorageState creates a new mock instance. +func NewMockStorageState(ctrl *gomock.Controller) *MockStorageState { + mock := &MockStorageState{ctrl: ctrl} + mock.recorder = &MockStorageStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStorageState) EXPECT() *MockStorageStateMockRecorder { + return m.recorder +} + +// LoadCodeHash mocks base method. +func (m *MockStorageState) LoadCodeHash(arg0 *common.Hash) (common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadCodeHash", arg0) + ret0, _ := ret[0].(common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LoadCodeHash indicates an expected call of LoadCodeHash. +func (mr *MockStorageStateMockRecorder) LoadCodeHash(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadCodeHash", reflect.TypeOf((*MockStorageState)(nil).LoadCodeHash), arg0) +} + +// Lock mocks base method. +func (m *MockStorageState) Lock() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Lock") +} + +// Lock indicates an expected call of Lock. +func (mr *MockStorageStateMockRecorder) Lock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockStorageState)(nil).Lock)) +} + +// TrieState mocks base method. +func (m *MockStorageState) TrieState(arg0 *common.Hash) (*storage.TrieState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TrieState", arg0) + ret0, _ := ret[0].(*storage.TrieState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TrieState indicates an expected call of TrieState. +func (mr *MockStorageStateMockRecorder) TrieState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TrieState", reflect.TypeOf((*MockStorageState)(nil).TrieState), arg0) +} + +// Unlock mocks base method. +func (m *MockStorageState) Unlock() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Unlock") +} + +// Unlock indicates an expected call of Unlock. +func (mr *MockStorageStateMockRecorder) Unlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockStorageState)(nil).Unlock)) +} + +// MockCodeSubstitutedState is a mock of CodeSubstitutedState interface. +type MockCodeSubstitutedState struct { + ctrl *gomock.Controller + recorder *MockCodeSubstitutedStateMockRecorder +} + +// MockCodeSubstitutedStateMockRecorder is the mock recorder for MockCodeSubstitutedState. +type MockCodeSubstitutedStateMockRecorder struct { + mock *MockCodeSubstitutedState +} + +// NewMockCodeSubstitutedState creates a new mock instance. +func NewMockCodeSubstitutedState(ctrl *gomock.Controller) *MockCodeSubstitutedState { + mock := &MockCodeSubstitutedState{ctrl: ctrl} + mock.recorder = &MockCodeSubstitutedStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCodeSubstitutedState) EXPECT() *MockCodeSubstitutedStateMockRecorder { + return m.recorder +} + +// LoadCodeSubstitutedBlockHash mocks base method. +func (m *MockCodeSubstitutedState) LoadCodeSubstitutedBlockHash() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadCodeSubstitutedBlockHash") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// LoadCodeSubstitutedBlockHash indicates an expected call of LoadCodeSubstitutedBlockHash. +func (mr *MockCodeSubstitutedStateMockRecorder) LoadCodeSubstitutedBlockHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadCodeSubstitutedBlockHash", reflect.TypeOf((*MockCodeSubstitutedState)(nil).LoadCodeSubstitutedBlockHash)) +} + +// StoreCodeSubstitutedBlockHash mocks base method. +func (m *MockCodeSubstitutedState) StoreCodeSubstitutedBlockHash(arg0 common.Hash) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StoreCodeSubstitutedBlockHash", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// StoreCodeSubstitutedBlockHash indicates an expected call of StoreCodeSubstitutedBlockHash. +func (mr *MockCodeSubstitutedStateMockRecorder) StoreCodeSubstitutedBlockHash(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreCodeSubstitutedBlockHash", reflect.TypeOf((*MockCodeSubstitutedState)(nil).StoreCodeSubstitutedBlockHash), arg0) +} + +// MockTransactionState is a mock of TransactionState interface. +type MockTransactionState struct { + ctrl *gomock.Controller + recorder *MockTransactionStateMockRecorder +} + +// MockTransactionStateMockRecorder is the mock recorder for MockTransactionState. +type MockTransactionStateMockRecorder struct { + mock *MockTransactionState +} + +// NewMockTransactionState creates a new mock instance. +func NewMockTransactionState(ctrl *gomock.Controller) *MockTransactionState { + mock := &MockTransactionState{ctrl: ctrl} + mock.recorder = &MockTransactionStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTransactionState) EXPECT() *MockTransactionStateMockRecorder { + return m.recorder +} + +// RemoveExtrinsic mocks base method. +func (m *MockTransactionState) RemoveExtrinsic(arg0 types.Extrinsic) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveExtrinsic", arg0) +} + +// RemoveExtrinsic indicates an expected call of RemoveExtrinsic. +func (mr *MockTransactionStateMockRecorder) RemoveExtrinsic(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveExtrinsic", reflect.TypeOf((*MockTransactionState)(nil).RemoveExtrinsic), arg0) +} + +// MockBabeVerifier is a mock of BabeVerifier interface. +type MockBabeVerifier struct { + ctrl *gomock.Controller + recorder *MockBabeVerifierMockRecorder +} + +// MockBabeVerifierMockRecorder is the mock recorder for MockBabeVerifier. +type MockBabeVerifierMockRecorder struct { + mock *MockBabeVerifier +} + +// NewMockBabeVerifier creates a new mock instance. +func NewMockBabeVerifier(ctrl *gomock.Controller) *MockBabeVerifier { + mock := &MockBabeVerifier{ctrl: ctrl} + mock.recorder = &MockBabeVerifierMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBabeVerifier) EXPECT() *MockBabeVerifierMockRecorder { + return m.recorder +} + +// VerifyBlock mocks base method. +func (m *MockBabeVerifier) VerifyBlock(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyBlock", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyBlock indicates an expected call of VerifyBlock. +func (mr *MockBabeVerifierMockRecorder) VerifyBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyBlock", reflect.TypeOf((*MockBabeVerifier)(nil).VerifyBlock), arg0) +} + +// MockFinalityGadget is a mock of FinalityGadget interface. +type MockFinalityGadget struct { + ctrl *gomock.Controller + recorder *MockFinalityGadgetMockRecorder +} + +// MockFinalityGadgetMockRecorder is the mock recorder for MockFinalityGadget. +type MockFinalityGadgetMockRecorder struct { + mock *MockFinalityGadget +} + +// NewMockFinalityGadget creates a new mock instance. +func NewMockFinalityGadget(ctrl *gomock.Controller) *MockFinalityGadget { + mock := &MockFinalityGadget{ctrl: ctrl} + mock.recorder = &MockFinalityGadgetMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFinalityGadget) EXPECT() *MockFinalityGadgetMockRecorder { + return m.recorder +} + +// VerifyBlockJustification mocks base method. +func (m *MockFinalityGadget) VerifyBlockJustification(arg0 common.Hash, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyBlockJustification", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyBlockJustification indicates an expected call of VerifyBlockJustification. +func (mr *MockFinalityGadgetMockRecorder) VerifyBlockJustification(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyBlockJustification", reflect.TypeOf((*MockFinalityGadget)(nil).VerifyBlockJustification), arg0, arg1) +} + +// MockBlockImportHandler is a mock of BlockImportHandler interface. +type MockBlockImportHandler struct { + ctrl *gomock.Controller + recorder *MockBlockImportHandlerMockRecorder +} + +// MockBlockImportHandlerMockRecorder is the mock recorder for MockBlockImportHandler. +type MockBlockImportHandlerMockRecorder struct { + mock *MockBlockImportHandler +} + +// NewMockBlockImportHandler creates a new mock instance. +func NewMockBlockImportHandler(ctrl *gomock.Controller) *MockBlockImportHandler { + mock := &MockBlockImportHandler{ctrl: ctrl} + mock.recorder = &MockBlockImportHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBlockImportHandler) EXPECT() *MockBlockImportHandlerMockRecorder { + return m.recorder +} + +// HandleBlockImport mocks base method. +func (m *MockBlockImportHandler) HandleBlockImport(arg0 *types.Block, arg1 *storage.TrieState) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HandleBlockImport", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// HandleBlockImport indicates an expected call of HandleBlockImport. +func (mr *MockBlockImportHandlerMockRecorder) HandleBlockImport(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleBlockImport", reflect.TypeOf((*MockBlockImportHandler)(nil).HandleBlockImport), arg0, arg1) +} + +// MockNetwork is a mock of Network interface. +type MockNetwork struct { + ctrl *gomock.Controller + recorder *MockNetworkMockRecorder +} + +// MockNetworkMockRecorder is the mock recorder for MockNetwork. +type MockNetworkMockRecorder struct { + mock *MockNetwork +} + +// NewMockNetwork creates a new mock instance. +func NewMockNetwork(ctrl *gomock.Controller) *MockNetwork { + mock := &MockNetwork{ctrl: ctrl} + mock.recorder = &MockNetworkMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { + return m.recorder +} + +// DoBlockRequest mocks base method. +func (m *MockNetwork) DoBlockRequest(arg0 peer.ID, arg1 *network.BlockRequestMessage) (*network.BlockResponseMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DoBlockRequest", arg0, arg1) + ret0, _ := ret[0].(*network.BlockResponseMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DoBlockRequest indicates an expected call of DoBlockRequest. +func (mr *MockNetworkMockRecorder) DoBlockRequest(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoBlockRequest", reflect.TypeOf((*MockNetwork)(nil).DoBlockRequest), arg0, arg1) +} + +// Peers mocks base method. +func (m *MockNetwork) Peers() []common.PeerInfo { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Peers") + ret0, _ := ret[0].([]common.PeerInfo) + return ret0 +} + +// Peers indicates an expected call of Peers. +func (mr *MockNetworkMockRecorder) Peers() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peers", reflect.TypeOf((*MockNetwork)(nil).Peers)) +} + +// ReportPeer mocks base method. +func (m *MockNetwork) ReportPeer(arg0 peerset.ReputationChange, arg1 peer.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ReportPeer", arg0, arg1) +} + +// ReportPeer indicates an expected call of ReportPeer. +func (mr *MockNetworkMockRecorder) ReportPeer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeer", reflect.TypeOf((*MockNetwork)(nil).ReportPeer), arg0, arg1) +} diff --git a/dot/sync/outliers.go b/dot/sync/outliers.go index 3e1c29424f..be33e69a1a 100644 --- a/dot/sync/outliers.go +++ b/dot/sync/outliers.go @@ -8,7 +8,7 @@ import ( "sort" ) -// removeOutliers removes the outlier from the slice +// nonOutliersSumCount calculates the sum and count of non-outlier elements // Explanation: // IQR outlier detection // Q25 = 25th_percentile @@ -18,8 +18,8 @@ import ( // If x > Q75 + 3.0 * IQR or x < Q25 – 3.0 * IQR THEN x is a extreme outlier // Ref: http://www.mathwords.com/o/outlier.htm // -// returns: sum of all the non-outliers elements -func removeOutliers(dataArrUint []uint) (sum *big.Int, count uint) { +// returns: sum and count of all the non-outliers elements +func nonOutliersSumCount(dataArrUint []uint) (sum *big.Int, count uint) { dataArr := make([]*big.Int, len(dataArrUint)) for i, v := range dataArrUint { dataArr[i] = big.NewInt(int64(v)) diff --git a/dot/sync/outliers_integeration_test.go b/dot/sync/outliers_integeration_test.go deleted file mode 100644 index 86d5313094..0000000000 --- a/dot/sync/outliers_integeration_test.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build integration -// +build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "math/big" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestRemoveOutlier(t *testing.T) { - t.Parallel() - - arr := []uint{100, 0, 260, 280, 220, 240, 250, 1000} - - expectedSum := big.NewInt(1350) // excluding the outlier -100 and 1000 - expectedCount := uint(7) - - sum, count := removeOutliers(arr) - assert.Equal(t, expectedSum, sum) - assert.Equal(t, expectedCount, count) -} diff --git a/dot/sync/outliers_test.go b/dot/sync/outliers_test.go new file mode 100644 index 0000000000..a407d654d9 --- /dev/null +++ b/dot/sync/outliers_test.go @@ -0,0 +1,46 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_nonOutliersSumCount(t *testing.T) { + tests := []struct { + name string + dataArr []uint + wantSum *big.Int + wantCount uint + }{ + { + name: "case 0 outliers", + dataArr: []uint{2, 5, 6, 9, 12}, + wantSum: big.NewInt(34), + wantCount: uint(5), + }, + { + name: "case 1 outliers", + dataArr: []uint{100, 2, 260, 280, 220, 240, 250, 1000}, + wantSum: big.NewInt(1352), + wantCount: uint(7), + }, + { + name: "case 2 outliers", + dataArr: []uint{5000, 500, 5560, 5580, 5520, 5540, 5550, 100000}, + wantSum: big.NewInt(32750), + wantCount: uint(6), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotSum, gotCount := nonOutliersSumCount(tt.dataArr) + assert.Equal(t, tt.wantSum, gotSum) + assert.Equal(t, tt.wantCount, gotCount) + }) + } +} diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index a9c5921a7a..6cf7965ada 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -100,7 +100,7 @@ func NewService(cfg *Config) (*Service, error) { // Start begins the chainSync and chainProcessor modules. It begins syncing in bootstrap mode func (s *Service) Start() error { go s.chainSync.start() - go s.chainProcessor.start() + s.chainProcessor.start() return nil } diff --git a/dot/sync/syncer_integeration_test.go b/dot/sync/syncer_integeration_test.go index e495534d11..fd1f21a012 100644 --- a/dot/sync/syncer_integeration_test.go +++ b/dot/sync/syncer_integeration_test.go @@ -27,19 +27,6 @@ import ( "github.com/stretchr/testify/require" ) -func newMockFinalityGadget() *mocks.FinalityGadget { - m := new(mocks.FinalityGadget) - // using []uint8 instead of []byte: https://github.com/stretchr/testify/pull/969 - m.On("VerifyBlockJustification", mock.AnythingOfType("common.Hash"), mock.AnythingOfType("[]uint8")).Return(nil) - return m -} - -func newMockBabeVerifier() *mocks.BabeVerifier { - m := new(mocks.BabeVerifier) - m.On("VerifyBlock", mock.AnythingOfType("*types.Header")).Return(nil) - return m -} - func newMockNetwork() *mocks.Network { m := new(mocks.Network) m.On("DoBlockRequest", mock.AnythingOfType("peer.ID"), @@ -127,12 +114,16 @@ func newTestSyncer(t *testing.T) *Service { }) cfg.TransactionState = stateSrvc.Transaction - cfg.BabeVerifier = newMockBabeVerifier() + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockBabeVerifier.EXPECT().VerifyBlock(gomock.AssignableToTypeOf(&types.Header{})).AnyTimes() + cfg.BabeVerifier = mockBabeVerifier cfg.LogLvl = log.Trace - cfg.FinalityGadget = newMockFinalityGadget() + mockFinalityGadget := NewMockFinalityGadget(ctrl) + mockFinalityGadget.EXPECT().VerifyBlockJustification(gomock.AssignableToTypeOf(common.Hash{}), + gomock.AssignableToTypeOf([]byte{})).AnyTimes() + cfg.FinalityGadget = mockFinalityGadget cfg.Network = newMockNetwork() cfg.Telemetry = mockTelemetryClient - syncer, err := NewService(cfg) require.NoError(t, err) return syncer diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go new file mode 100644 index 0000000000..fb45ee57df --- /dev/null +++ b/dot/sync/syncer_test.go @@ -0,0 +1,409 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "errors" + "testing" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/golang/mock/gomock" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" +) + +func TestNewService(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + cfgBuilder func(ctrl *gomock.Controller) *Config + want *Service + err error + }{ + { + name: "nil Network", + cfgBuilder: func(_ *gomock.Controller) *Config { + return &Config{} + }, + err: errNilNetwork, + }, + { + name: "nil BlockState", + cfgBuilder: func(_ *gomock.Controller) *Config { + return &Config{ + Network: NewMockNetwork(nil), + } + }, + err: errNilBlockState, + }, + { + name: "nil StorageState", + cfgBuilder: func(_ *gomock.Controller) *Config { + return &Config{ + Network: NewMockNetwork(nil), + BlockState: NewMockBlockState(nil), + } + }, + err: errNilStorageState, + }, + { + name: "nil FinalityGadget", + cfgBuilder: func(_ *gomock.Controller) *Config { + return &Config{ + Network: NewMockNetwork(nil), + BlockState: NewMockBlockState(nil), + StorageState: NewMockStorageState(nil), + } + }, + err: errNilFinalityGadget, + }, + { + name: "nil TransactionState", + cfgBuilder: func(_ *gomock.Controller) *Config { + return &Config{ + Network: NewMockNetwork(nil), + BlockState: NewMockBlockState(nil), + StorageState: NewMockStorageState(nil), + FinalityGadget: NewMockFinalityGadget(nil), + } + }, + err: errNilTransactionState, + }, + { + name: "nil Verifier", + cfgBuilder: func(_ *gomock.Controller) *Config { + return &Config{ + Network: NewMockNetwork(nil), + BlockState: NewMockBlockState(nil), + StorageState: NewMockStorageState(nil), + FinalityGadget: NewMockFinalityGadget(nil), + TransactionState: NewMockTransactionState(nil), + } + }, + err: errNilVerifier, + }, + { + name: "nil BlockImportHandler", + cfgBuilder: func(_ *gomock.Controller) *Config { + return &Config{ + Network: NewMockNetwork(nil), + BlockState: NewMockBlockState(nil), + StorageState: NewMockStorageState(nil), + FinalityGadget: NewMockFinalityGadget(nil), + TransactionState: NewMockTransactionState(nil), + BabeVerifier: NewMockBabeVerifier(nil), + } + }, + err: errNilBlockImportHandler, + }, + { + name: "working example", + cfgBuilder: func(ctrl *gomock.Controller) *Config { + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().GetFinalisedNotifierChannel(). + Return(make(chan *types.FinalisationInfo)) + return &Config{ + Network: NewMockNetwork(nil), + BlockState: blockState, + StorageState: NewMockStorageState(nil), + FinalityGadget: NewMockFinalityGadget(nil), + TransactionState: NewMockTransactionState(nil), + BabeVerifier: NewMockBabeVerifier(nil), + BlockImportHandler: NewMockBlockImportHandler(nil), + } + }, + want: &Service{}, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + config := tt.cfgBuilder(ctrl) + + got, err := NewService(config) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + if tt.want != nil { + assert.NotNil(t, got) + } + }) + } +} + +func TestService_HandleBlockAnnounce(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + + type fields struct { + chainSync ChainSync + } + type args struct { + from peer.ID + msg *network.BlockAnnounceMessage + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "working example", + fields: fields{ + chainSync: newMockChainSync(ctrl), + }, + args: args{ + from: peer.ID("1"), + msg: &network.BlockAnnounceMessage{ + ParentHash: common.Hash{}, + Number: 1, + StateRoot: common.Hash{}, + ExtrinsicsRoot: common.Hash{}, + Digest: scale.VaryingDataTypeSlice{}, + BestBlock: false, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + s := &Service{ + chainSync: tt.fields.chainSync, + } + if err := s.HandleBlockAnnounce(tt.args.from, tt.args.msg); (err != nil) != tt.wantErr { + t.Errorf("HandleBlockAnnounce() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func newMockChainSync(ctrl *gomock.Controller) ChainSync { + mock := NewMockChainSync(ctrl) + header, _ := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, + scale.VaryingDataTypeSlice{}) + + mock.EXPECT().setBlockAnnounce(peer.ID("1"), header).Return(nil).AnyTimes() + mock.EXPECT().setPeerHead(peer.ID("1"), common.Hash{}, uint(0)).Return(nil).AnyTimes() + mock.EXPECT().syncState().Return(bootstrap).AnyTimes() + mock.EXPECT().start().AnyTimes() + mock.EXPECT().stop().AnyTimes() + mock.EXPECT().getHighestBlock().Return(uint(2), nil).AnyTimes() + + return mock +} + +func Test_Service_HandleBlockAnnounceHandshake(t *testing.T) { + t.Parallel() + + errTest := errors.New("test error") + + testCases := map[string]struct { + serviceBuilder func(ctrl *gomock.Controller) Service + from peer.ID + message *network.BlockAnnounceHandshake + errWrapped error + errMessage string + }{ + "success": { + serviceBuilder: func(ctrl *gomock.Controller) Service { + chainSync := NewMockChainSync(ctrl) + chainSync.EXPECT().setPeerHead(peer.ID("abc"), common.Hash{1}, uint(2)). + Return(nil) + return Service{ + chainSync: chainSync, + } + }, + from: peer.ID("abc"), + message: &network.BlockAnnounceHandshake{ + BestBlockHash: common.Hash{1}, + BestBlockNumber: 2, + }, + }, + "failure": { + serviceBuilder: func(ctrl *gomock.Controller) Service { + chainSync := NewMockChainSync(ctrl) + chainSync.EXPECT().setPeerHead(peer.ID("abc"), common.Hash{1}, uint(2)). + Return(errTest) + return Service{ + chainSync: chainSync, + } + }, + from: peer.ID("abc"), + message: &network.BlockAnnounceHandshake{ + BestBlockHash: common.Hash{1}, + BestBlockNumber: 2, + }, + errWrapped: errTest, + errMessage: "test error", + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + service := testCase.serviceBuilder(ctrl) + + err := service.HandleBlockAnnounceHandshake(testCase.from, testCase.message) + + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + }) + } +} + +func TestService_IsSynced(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + serviceBuilder func(ctrl *gomock.Controller) Service + synced bool + }{ + "tip": { + serviceBuilder: func(ctrl *gomock.Controller) Service { + chainSync := NewMockChainSync(ctrl) + chainSync.EXPECT().syncState().Return(tip) + return Service{ + chainSync: chainSync, + } + }, + synced: true, + }, + "not tip": { + serviceBuilder: func(ctrl *gomock.Controller) Service { + chainSync := NewMockChainSync(ctrl) + chainSync.EXPECT().syncState().Return(bootstrap) + return Service{ + chainSync: chainSync, + } + }, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + service := testCase.serviceBuilder(ctrl) + + synced := service.IsSynced() + + assert.Equal(t, testCase.synced, synced) + }) + } +} + +func TestService_Start(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + done := make(chan struct{}) + + chainSync := NewMockChainSync(ctrl) + chainSync.EXPECT().start().DoAndReturn(func() { + close(done) + }) + + chainProcessor := NewMockChainProcessor(ctrl) + chainProcessor.EXPECT().start() + + service := Service{ + chainSync: chainSync, + chainProcessor: chainProcessor, + } + + err := service.Start() + <-done + assert.NoError(t, err) +} + +func TestService_Stop(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + chainSync := NewMockChainSync(ctrl) + chainSync.EXPECT().stop() + chainProcessor := NewMockChainProcessor(ctrl) + chainProcessor.EXPECT().stop() + + service := &Service{ + chainSync: chainSync, + chainProcessor: chainProcessor, + } + + err := service.Stop() + assert.NoError(t, err) +} + +func Test_reverseBlockData(t *testing.T) { + t.Parallel() + + type args struct { + data []*types.BlockData + } + tests := []struct { + name string + args args + expected args + }{ + { + name: "working example", + args: args{data: []*types.BlockData{ + { + Hash: common.MustHexToHash("0x01"), + }, + { + Hash: common.MustHexToHash("0x02"), + }}}, + expected: args{data: []*types.BlockData{{ + Hash: common.MustHexToHash("0x02"), + }, { + Hash: common.MustHexToHash("0x01"), + }}, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + reverseBlockData(tt.args.data) + assert.Equal(t, tt.expected.data, tt.args.data) + }) + } +} + +func TestService_HighestBlock(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + chainSync := NewMockChainSync(ctrl) + chainSync.EXPECT().getHighestBlock().Return(uint(2), nil) + + service := &Service{ + chainSync: chainSync, + } + highestBlock := service.HighestBlock() + const expected = uint(2) + assert.Equal(t, expected, highestBlock) +} diff --git a/dot/sync/test_helpers.go b/dot/sync/test_helpers.go index f94beb2c22..5766bd5dec 100644 --- a/dot/sync/test_helpers.go +++ b/dot/sync/test_helpers.go @@ -10,7 +10,6 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/runtime" "github.com/ChainSafe/gossamer/pkg/scale" - "github.com/stretchr/testify/require" ) diff --git a/dot/sync/tip_syncer_integeration_test.go b/dot/sync/tip_syncer_integeration_test.go index 4014e4c79f..ca32eac06b 100644 --- a/dot/sync/tip_syncer_integeration_test.go +++ b/dot/sync/tip_syncer_integeration_test.go @@ -9,13 +9,14 @@ package sync import ( "context" "testing" + "time" "github.com/ChainSafe/gossamer/dot/network" syncmocks "github.com/ChainSafe/gossamer/dot/sync/mocks" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/trie" - + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -31,12 +32,7 @@ func newTestTipSyncer(t *testing.T) *tipSyncer { readyBlocks := newBlockQueue(maxResponseSize) pendingBlocks := newDisjointBlockSet(pendingBlocksLimit) - cs := &chainSync{ - blockState: bs, - readyBlocks: readyBlocks, - pendingBlocks: pendingBlocks, - } - return newTipSyncer(bs, pendingBlocks, readyBlocks, cs.handleReadyBlock) + return newTipSyncer(bs, pendingBlocks, readyBlocks, nil) } func TestTipSyncer_handleNewPeerState(t *testing.T) { @@ -178,12 +174,18 @@ func TestTipSyncer_handleTick_case1(t *testing.T) { targetNumber: uintPtr(fin.Number), direction: network.Descending, requestData: bootstrapRequestData, - pendingBlock: s.pendingBlocks.getBlock(common.Hash{0xb}), + pendingBlock: &pendingBlock{ + hash: common.Hash{0xb}, + number: 201, + clearAt: time.Unix(0, 0), + }, }, } - w, err = s.handleTick() require.NoError(t, err) + require.NotEmpty(t, w) + assert.Greater(t, w[0].pendingBlock.clearAt, time.Now()) + w[0].pendingBlock.clearAt = time.Unix(0, 0) require.Equal(t, expected, w) require.False(t, s.pendingBlocks.hasBlock(common.Hash{0xa})) require.True(t, s.pendingBlocks.hasBlock(common.Hash{0xb})) @@ -208,18 +210,28 @@ func TestTipSyncer_handleTick_case2(t *testing.T) { targetNumber: uintPtr(header.Number), direction: network.Ascending, requestData: network.RequestedDataBody + network.RequestedDataJustification, - pendingBlock: s.pendingBlocks.getBlock(header.Hash()), + pendingBlock: &pendingBlock{ + hash: header.Hash(), + number: 201, + header: header, + clearAt: time.Time{}, + }, }, } w, err := s.handleTick() require.NoError(t, err) + require.NotEmpty(t, w) + assert.Greater(t, w[0].pendingBlock.clearAt, time.Now()) + w[0].pendingBlock.clearAt = time.Time{} require.Equal(t, expected, w) require.True(t, s.pendingBlocks.hasBlock(header.Hash())) } - func TestTipSyncer_handleTick_case3(t *testing.T) { s := newTestTipSyncer(t) - + s.handleReadyBlock = func(data *types.BlockData) { + s.pendingBlocks.removeBlock(data.Hash) + s.readyBlocks.push(data) + } fin, _ := s.blockState.GetHighestFinalisedHeader() // add pending block w/ full block, HasHeader will return true, so the block will be processed @@ -261,12 +273,21 @@ func TestTipSyncer_handleTick_case3(t *testing.T) { targetNumber: uintPtr(fin.Number), direction: network.Descending, requestData: bootstrapRequestData, - pendingBlock: s.pendingBlocks.getBlock(header.Hash()), + pendingBlock: &pendingBlock{ + hash: header.Hash(), + number: 300, + header: header, + body: &types.Body{}, + clearAt: time.Time{}, + }, }, } w, err = s.handleTick() require.NoError(t, err) + require.NotEmpty(t, w) + assert.Greater(t, w[0].pendingBlock.clearAt, time.Now()) + w[0].pendingBlock.clearAt = time.Time{} require.Equal(t, expected, w) require.True(t, s.pendingBlocks.hasBlock(header.Hash())) @@ -278,7 +299,7 @@ func TestTipSyncer_handleTick_case3(t *testing.T) { require.NoError(t, err) require.Equal(t, []*worker(nil), w) require.False(t, s.pendingBlocks.hasBlock(header.Hash())) - _ = s.readyBlocks.pop(context.Background()) // first pop removes the parent + _ = s.readyBlocks.pop(context.Background()) // first pop will remove parent readyBlockData = s.readyBlocks.pop(context.Background()) require.Equal(t, block.ToBlockData(), readyBlockData) } diff --git a/dot/sync/tip_syncer_test.go b/dot/sync/tip_syncer_test.go new file mode 100644 index 0000000000..097f0dac75 --- /dev/null +++ b/dot/sync/tip_syncer_test.go @@ -0,0 +1,401 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "testing" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func Test_tipSyncer_handleNewPeerState(t *testing.T) { + t.Parallel() + + type fields struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + pendingBlocks DisjointBlockSet + readyBlocks *blockQueue + } + tests := map[string]struct { + fields fields + peerState *peerState + want *worker + err error + }{ + "peer state number < final block number": { + fields: fields{ + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + }, + peerState: &peerState{number: 1}, + want: nil, + }, + "base state": { + fields: fields{ + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + }, + peerState: &peerState{number: 3}, + want: &worker{ + startNumber: uintPtr(3), + targetNumber: uintPtr(3), + requestData: bootstrapRequestData, + }, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &tipSyncer{ + blockState: tt.fields.blockStateBuilder(ctrl), + pendingBlocks: tt.fields.pendingBlocks, + readyBlocks: tt.fields.readyBlocks, + } + got, err := s.handleNewPeerState(tt.peerState) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_tipSyncer_handleTick(t *testing.T) { + t.Parallel() + + type fields struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + pendingBlocksBuilder func(ctrl *gomock.Controller) DisjointBlockSet + readyBlocks *blockQueue + } + tests := map[string]struct { + fields fields + want []*worker + err error + }{ + "base case": { + fields: fields{ + pendingBlocksBuilder: func(ctrl *gomock.Controller) DisjointBlockSet { + mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) + mockDisjointBlockSet.EXPECT().size().Return(1).Times(2) + mockDisjointBlockSet.EXPECT().getBlocks().Return([]*pendingBlock{ + {number: 2}, + {number: 3}, + {number: 4, + header: &types.Header{ + Number: 4, + }, + }, + {number: 5, + header: &types.Header{ + Number: 5, + }, + body: &types.Body{}, + }, + }) + mockDisjointBlockSet.EXPECT().removeBlock(common.Hash{}) + return mockDisjointBlockSet + }, + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ + Number: 2, + }, nil) + mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) + return mockBlockState + }, + readyBlocks: newBlockQueue(3), + }, + want: []*worker{ + { + startNumber: uintPtr(3), + targetNumber: uintPtr(2), + targetHash: common.Hash{5, 189, 204, 69, 79, 96, 160, 141, 66, 125, 5, 231, 241, + 159, 36, 15, 220, 57, 31, 87, 10, 183, 111, 203, 150, 236, 202, 11, 88, 35, 211, 191}, + pendingBlock: &pendingBlock{number: 3}, + requestData: bootstrapRequestData, + direction: network.Descending, + }, + { + startNumber: uintPtr(4), + targetNumber: uintPtr(4), + pendingBlock: &pendingBlock{ + number: 4, + header: &types.Header{ + Number: 4, + }, + }, + requestData: network.RequestedDataBody + network.RequestedDataJustification, + }, + { + startNumber: uintPtr(4), + targetNumber: uintPtr(2), + direction: network.Descending, + pendingBlock: &pendingBlock{ + number: 5, + header: &types.Header{ + Number: 5, + }, + body: &types.Body{}, + }, + requestData: bootstrapRequestData, + }, + }, + err: nil, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &tipSyncer{ + blockState: tt.fields.blockStateBuilder(ctrl), + pendingBlocks: tt.fields.pendingBlocksBuilder(ctrl), + readyBlocks: tt.fields.readyBlocks, + } + got, err := s.handleTick() + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_tipSyncer_handleWorkerResult(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + res *worker + want *worker + err error + }{ + "worker error is nil": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + return NewMockBlockState(ctrl) + }, + res: &worker{}, + want: nil, + err: nil, + }, + "worker error is error unknown parent": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + return NewMockBlockState(ctrl) + }, + res: &worker{ + err: &workerError{ + err: errUnknownParent, + }, + }, + want: nil, + err: nil, + }, + "ascending, target number < finalised number": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + res: &worker{ + targetNumber: uintPtr(1), + direction: network.Ascending, + err: &workerError{}, + }, + want: nil, + err: nil, + }, + "ascending, start number < finalised number": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + res: &worker{ + startNumber: uintPtr(1), + targetNumber: uintPtr(3), + direction: network.Ascending, + err: &workerError{}, + }, + want: &worker{ + startNumber: uintPtr(3), + targetNumber: uintPtr(3), + }, + err: nil, + }, + "descending, start number < finalised number": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + res: &worker{ + startNumber: uintPtr(1), + direction: network.Descending, + err: &workerError{}, + }, + want: nil, + err: nil, + }, + "descending, target number < finalised number": { + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ + Number: 2, + }, nil) + return mockBlockState + }, + res: &worker{ + startNumber: uintPtr(3), + targetNumber: uintPtr(1), + direction: network.Descending, + err: &workerError{}, + }, + want: &worker{ + startNumber: uintPtr(3), + targetNumber: uintPtr(3), + direction: network.Descending, + }, + err: nil, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &tipSyncer{ + blockState: tt.blockStateBuilder(ctrl), + } + got, err := s.handleWorkerResult(tt.res) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_tipSyncer_hasCurrentWorker(t *testing.T) { + t.Parallel() + + type args struct { + w *worker + workers map[uint64]*worker + } + tests := map[string]struct { + args args + want bool + }{ + "worker nil": { + want: true, + }, + "ascending, false": { + args: args{ + w: &worker{ + direction: network.Ascending, + startNumber: uintPtr(2), + targetNumber: uintPtr(2), + }, + workers: map[uint64]*worker{ + 1: { + direction: network.Ascending, + targetNumber: uintPtr(3), + startNumber: uintPtr(3), + }, + }, + }, + want: false, + }, + "ascending, true": { + args: args{ + w: &worker{ + direction: network.Ascending, + startNumber: uintPtr(2), + targetNumber: uintPtr(2), + }, + workers: map[uint64]*worker{ + 1: { + direction: network.Ascending, + targetNumber: uintPtr(3), + startNumber: uintPtr(1), + }, + }, + }, + want: true, + }, + "descending, false": { + args: args{ + w: &worker{ + direction: network.Descending, + startNumber: uintPtr(2), + targetNumber: uintPtr(2), + }, + workers: map[uint64]*worker{ + 1: { + startNumber: uintPtr(3), + targetNumber: uintPtr(3), + direction: network.Descending, + }, + }, + }, + want: false, + }, + "descending, true": { + args: args{ + w: &worker{ + direction: network.Descending, + startNumber: uintPtr(2), + targetNumber: uintPtr(2), + }, + workers: map[uint64]*worker{ + 1: { + startNumber: uintPtr(3), + targetNumber: uintPtr(1), + direction: network.Descending, + }, + }, + }, + want: true, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + s := &tipSyncer{} + got := s.hasCurrentWorker(tt.args.w, tt.args.workers) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/dot/telemetry/mailer_test.go b/dot/telemetry/mailer_test.go index f76b5e020f..8b4b414dfd 100644 --- a/dot/telemetry/mailer_test.go +++ b/dot/telemetry/mailer_test.go @@ -221,6 +221,7 @@ func TestListenerConcurrency(t *testing.T) { } func TestTelemetryMarshalMessage(t *testing.T) { + t.Parallel() tests := map[string]struct { message Message expected string From 7f55bec0c8de537442b757b42a1e32da79bf22f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jun 2022 12:11:22 -0400 Subject: [PATCH 07/48] chore(deps): bump github.com/docker/docker (#2599) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dcb3274a85..50a84cbbc5 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/dgraph-io/badger/v2 v2.2007.4 github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de github.com/disiqueira/gotree v1.0.0 - github.com/docker/docker v20.10.15+incompatible + github.com/docker/docker v20.10.17+incompatible github.com/ethereum/go-ethereum v1.10.18 github.com/fatih/color v1.13.0 github.com/go-playground/validator/v10 v10.11.0 diff --git a/go.sum b/go.sum index a273fc554e..b8036fd6e7 100644 --- a/go.sum +++ b/go.sum @@ -250,8 +250,8 @@ github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/ github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.15+incompatible h1:dk9FewY/9Xwm4ay/HViEEHSQuM/kL4F+JaG6GQdgmGo= -github.com/docker/docker v20.10.15+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= From 6a9d85b059d1ea733e148f7e678a9828a13800de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Jun 2022 09:05:04 -0400 Subject: [PATCH 08/48] chore(deps): bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#2600) --- go.mod | 4 ++-- go.sum | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 50a84cbbc5..887976ac2f 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_model v0.2.0 github.com/qdm12/gotree v0.2.0 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.7.2 github.com/urfave/cli v1.22.9 github.com/wasmerio/go-ext-wasm v0.3.2-0.20200326095750-0a32be6068ec golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 @@ -181,7 +181,7 @@ require ( golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect google.golang.org/appengine v1.6.6 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.0.3 // indirect ) diff --git a/go.sum b/go.sum index b8036fd6e7..7cf1ca16d8 100644 --- a/go.sum +++ b/go.sum @@ -1274,8 +1274,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= @@ -1857,8 +1858,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= From 223cfbb800181a66fbe5b19b32a0c7c9b7aa0c90 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Thu, 16 Jun 2022 11:05:15 -0400 Subject: [PATCH 09/48] fix(dot/sync): fix `Test_lockQueue_threadSafety` (#2605) --- dot/sync/block_queue_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/dot/sync/block_queue_test.go b/dot/sync/block_queue_test.go index f6796083bb..1691cf46e9 100644 --- a/dot/sync/block_queue_test.go +++ b/dot/sync/block_queue_test.go @@ -219,6 +219,17 @@ func Test_lockQueue_threadSafety(t *testing.T) { } blockHash := common.Hash{1} + endWg.Add(1) + go func() { + defer endWg.Done() + <-ctx.Done() + // Empty queue channel to make sure `push` does not block + // when the context is cancelled. + for len(blockQueue.queue) > 0 { + <-blockQueue.queue + } + }() + for i := 0; i < parallelism; i++ { go runInLoop(func() { blockQueue.push(blockData) From cdc6160d2c56932a4c91205977be9c69c3811dfa Mon Sep 17 00:00:00 2001 From: Zach Date: Thu, 16 Jun 2022 14:10:08 -0400 Subject: [PATCH 10/48] docs: fix video link to point to youtube instead of medium (#2603) --- docs/docs/getting-started/resources/general-resources.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/getting-started/resources/general-resources.md b/docs/docs/getting-started/resources/general-resources.md index f3157b8264..4c459238ac 100644 --- a/docs/docs/getting-started/resources/general-resources.md +++ b/docs/docs/getting-started/resources/general-resources.md @@ -50,7 +50,7 @@ Gossamer is an implementation of the Polkadot Host in Go. To learn more about Go - To understand the vision behind Gossamer and how it fits into the Polkadot ecosystem, please read this blog post: "The Future of Polkadot in Golang: Gossamer". -- For a more technical explanation, we recommend watching this video. +- For a more technical explanation, we recommend watching this video. ### Additional Resources From c061b3582eb2fad4b5f077c25925b5043b52290a Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Fri, 17 Jun 2022 11:40:56 -0400 Subject: [PATCH 11/48] fix(wasmer): fix flaky sort in `Test_ext_crypto_sr25519_public_keys_version_1` (#2607) --- lib/runtime/wasmer/imports_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/runtime/wasmer/imports_test.go b/lib/runtime/wasmer/imports_test.go index 12b5ec5397..3e28ccd062 100644 --- a/lib/runtime/wasmer/imports_test.go +++ b/lib/runtime/wasmer/imports_test.go @@ -1073,7 +1073,7 @@ func Test_ext_crypto_sr25519_public_keys_version_1(t *testing.T) { ks, _ := inst.ctx.Keystore.GetKeystore(idData) require.Equal(t, 0, ks.Size()) - size := 5 + const size = 5 pubKeys := make([][32]byte, size) for i := range pubKeys { kp, err := sr25519.GenerateKeypair() @@ -1083,7 +1083,9 @@ func Test_ext_crypto_sr25519_public_keys_version_1(t *testing.T) { copy(pubKeys[i][:], kp.Public().Encode()) } - sort.Slice(pubKeys, func(i int, j int) bool { return pubKeys[i][0] < pubKeys[j][0] }) + sort.Slice(pubKeys, func(i int, j int) bool { + return bytes.Compare(pubKeys[i][:], pubKeys[j][:]) < 0 + }) res, err := inst.Exec("rtm_ext_crypto_sr25519_public_keys_version_1", idData) require.NoError(t, err) @@ -1096,7 +1098,10 @@ func Test_ext_crypto_sr25519_public_keys_version_1(t *testing.T) { err = scale.Unmarshal(out, &ret) require.NoError(t, err) - sort.Slice(ret, func(i int, j int) bool { return ret[i][0] < ret[j][0] }) + sort.Slice(ret, func(i int, j int) bool { + return bytes.Compare(ret[i][:], ret[j][:]) < 0 + }) + require.Equal(t, pubKeys, ret) } From 7e1014b3ffbe53d8276e269334d3c01447877087 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Fri, 17 Jun 2022 12:26:41 -0400 Subject: [PATCH 12/48] fix(dot/sync): Fix flaky tests `Test_chainSync_logSyncSpeed` and `Test_chainSync_start` (#2610) --- dot/sync/chain_sync.go | 24 +++++++---- dot/sync/chain_sync_test.go | 83 ++++++++++++++++--------------------- 2 files changed, 52 insertions(+), 55 deletions(-) diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index a31b1f376f..a7cee0af3d 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -162,7 +162,10 @@ type chainSync struct { maxWorkerRetries uint16 slotDuration time.Duration - logSyncPeriod time.Duration + logSyncTicker *time.Ticker + logSyncTickerC <-chan time.Time // channel as field for unit testing + logSyncStarted bool + logSyncDone chan struct{} } type chainSyncConfig struct { @@ -178,6 +181,8 @@ func newChainSync(cfg *chainSyncConfig) *chainSync { ctx, cancel := context.WithCancel(context.Background()) const syncSamplesToKeep = 30 const logSyncPeriod = 5 * time.Second + logSyncTicker := time.NewTicker(logSyncPeriod) + return &chainSync{ ctx: ctx, cancel: cancel, @@ -197,7 +202,9 @@ func newChainSync(cfg *chainSyncConfig) *chainSync { minPeers: cfg.minPeers, maxWorkerRetries: uint16(cfg.maxPeers), slotDuration: cfg.slotDuration, - logSyncPeriod: logSyncPeriod, + logSyncTicker: logSyncTicker, + logSyncTickerC: logSyncTicker.C, + logSyncDone: make(chan struct{}), } } @@ -219,6 +226,7 @@ func (cs *chainSync) start() { cs.pendingBlockDoneCh = pendingBlockDoneCh go cs.pendingBlocks.run(pendingBlockDoneCh) go cs.sync() + cs.logSyncStarted = true go cs.logSyncSpeed() } @@ -227,6 +235,9 @@ func (cs *chainSync) stop() { close(cs.pendingBlockDoneCh) } cs.cancel() + if cs.logSyncStarted { + <-cs.logSyncDone + } } func (cs *chainSync) syncState() chainSyncState { @@ -333,8 +344,8 @@ func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error } func (cs *chainSync) logSyncSpeed() { - t := time.NewTicker(cs.logSyncPeriod) - defer t.Stop() + defer close(cs.logSyncDone) + defer cs.logSyncTicker.Stop() for { before, err := cs.blockState.BestBlockHeader() @@ -347,10 +358,7 @@ func (cs *chainSync) logSyncSpeed() { } select { - case <-t.C: - if cs.ctx.Err() != nil { - return - } + case <-cs.logSyncTickerC: // channel of cs.logSyncTicker case <-cs.ctx.Done(): return } diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index ae0e027692..df1a309641 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -1118,7 +1118,7 @@ func Test_chainSync_logSyncSpeed(t *testing.T) { type fields struct { blockStateBuilder func(ctrl *gomock.Controller) BlockState - networkBuilder func(ctrl *gomock.Controller, done chan struct{}) Network + networkBuilder func(ctrl *gomock.Controller) Network state chainSyncState benchmarker *syncBenchmarker } @@ -1131,16 +1131,13 @@ func Test_chainSync_logSyncSpeed(t *testing.T) { fields: fields{ blockStateBuilder: func(ctrl *gomock.Controller) BlockState { mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).AnyTimes() + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) return mockBlockState }, - networkBuilder: func(ctrl *gomock.Controller, done chan struct{}) Network { + networkBuilder: func(ctrl *gomock.Controller) Network { mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().DoAndReturn(func() error { - close(done) - return nil - }) + mockNetwork.EXPECT().Peers().Return(nil) return mockNetwork }, benchmarker: newSyncBenchmarker(10), @@ -1152,16 +1149,13 @@ func Test_chainSync_logSyncSpeed(t *testing.T) { fields: fields{ blockStateBuilder: func(ctrl *gomock.Controller) BlockState { mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).AnyTimes() + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) return mockBlockState }, - networkBuilder: func(ctrl *gomock.Controller, done chan struct{}) Network { + networkBuilder: func(ctrl *gomock.Controller) Network { mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().DoAndReturn(func() error { - close(done) - return nil - }) + mockNetwork.EXPECT().Peers().Return(nil) return mockNetwork }, benchmarker: newSyncBenchmarker(10), @@ -1175,19 +1169,24 @@ func Test_chainSync_logSyncSpeed(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) ctx, cancel := context.WithCancel(context.Background()) - done := make(chan struct{}) + tickerChannel := make(chan time.Time) cs := &chainSync{ - ctx: ctx, - cancel: cancel, - blockState: tt.fields.blockStateBuilder(ctrl), - network: tt.fields.networkBuilder(ctrl, done), - state: tt.fields.state, - benchmarker: tt.fields.benchmarker, - logSyncPeriod: time.Millisecond, + ctx: ctx, + cancel: cancel, + blockState: tt.fields.blockStateBuilder(ctrl), + network: tt.fields.networkBuilder(ctrl), + state: tt.fields.state, + benchmarker: tt.fields.benchmarker, + logSyncTickerC: tickerChannel, + logSyncTicker: time.NewTicker(time.Hour), // just here to be stopped + logSyncDone: make(chan struct{}), } + go cs.logSyncSpeed() - <-done - cancel() + + tickerChannel <- time.Time{} + cs.cancel() + <-cs.logSyncDone }) } } @@ -1197,10 +1196,8 @@ func Test_chainSync_start(t *testing.T) { type fields struct { blockStateBuilder func(ctrl *gomock.Controller) BlockState - disjointBlockSetBuilder func(ctrl *gomock.Controller) DisjointBlockSet - networkBuilder func(ctrl *gomock.Controller, done chan struct{}) Network + disjointBlockSetBuilder func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet benchmarker *syncBenchmarker - slotDuration time.Duration } tests := []struct { name string @@ -1211,26 +1208,18 @@ func Test_chainSync_start(t *testing.T) { fields: fields{ blockStateBuilder: func(ctrl *gomock.Controller) BlockState { mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).AnyTimes() - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).AnyTimes() + mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil) return mockBlockState }, - disjointBlockSetBuilder: func(ctrl *gomock.Controller) DisjointBlockSet { + disjointBlockSetBuilder: func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet { mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().run(gomock.Any()) + mockDisjointBlockSet.EXPECT().run(gomock.AssignableToTypeOf(make(<-chan struct{}))). + DoAndReturn(func(stop <-chan struct{}) { + close(called) // test glue, ideally we would use a ready chan struct passed to run(). + }) return mockDisjointBlockSet }, - networkBuilder: func(ctrl *gomock.Controller, done chan struct{}) Network { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().DoAndReturn(func() []common.PeerInfo { - close(done) - return nil - }) - return mockNetwork - }, - slotDuration: defaultSlotDuration, - benchmarker: newSyncBenchmarker(1), + benchmarker: newSyncBenchmarker(1), }, }, } @@ -1240,19 +1229,19 @@ func Test_chainSync_start(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) ctx, cancel := context.WithCancel(context.Background()) - done := make(chan struct{}) + disjointBlockSetCalled := make(chan struct{}) cs := &chainSync{ ctx: ctx, cancel: cancel, blockState: tt.fields.blockStateBuilder(ctrl), - pendingBlocks: tt.fields.disjointBlockSetBuilder(ctrl), - network: tt.fields.networkBuilder(ctrl, done), + pendingBlocks: tt.fields.disjointBlockSetBuilder(ctrl, disjointBlockSetCalled), benchmarker: tt.fields.benchmarker, - slotDuration: tt.fields.slotDuration, - logSyncPeriod: time.Second, + slotDuration: time.Hour, + logSyncTicker: time.NewTicker(time.Hour), // just here to be closed + logSyncDone: make(chan struct{}), } cs.start() - <-done + <-disjointBlockSetCalled cs.stop() }) } From 1fa1c65d710fb98718fc0d04a9c3598d09f41e06 Mon Sep 17 00:00:00 2001 From: Kishan Sagathiya Date: Mon, 20 Jun 2022 15:11:36 +0530 Subject: [PATCH 13/48] handle the case when epoch data is nil (#2590) Fixes #2584 --- dot/state/epoch.go | 35 ++++++++++++++++++++++++---------- lib/babe/verify.go | 2 ++ pkg/scale/varying_data_type.go | 2 +- 3 files changed, 28 insertions(+), 11 deletions(-) diff --git a/dot/state/epoch.go b/dot/state/epoch.go index c53ed7f799..d8f02ed2c7 100644 --- a/dot/state/epoch.go +++ b/dot/state/epoch.go @@ -17,9 +17,11 @@ import ( ) var ( - ErrEpochNotInMemory = errors.New("epoch not found in memory map") - errHashNotInMemory = errors.New("hash not found in memory map") - errHashNotPersisted = errors.New("hash with next epoch not found in database") + ErrEpochNotInMemory = errors.New("epoch not found in memory map") + errHashNotInMemory = errors.New("hash not found in memory map") + errEpochDataNotFound = errors.New("epoch data not found in the database") + errHashNotPersisted = errors.New("hash with next epoch not found in database") + errNoPreRuntimeDigest = errors.New("header does not contain pre-runtime digest") ) var ( @@ -225,7 +227,7 @@ func (s *EpochState) GetEpochForBlock(header *types.Header) (uint64, error) { return (slotNumber - firstSlot) / s.epochLength, nil } - return 0, errors.New("header does not contain pre-runtime digest") + return 0, errNoPreRuntimeDigest } // SetEpochData sets the epoch data for a given epoch @@ -251,14 +253,19 @@ func (s *EpochState) GetEpochData(epoch uint64, header *types.Header) (*types.Ep if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { return nil, fmt.Errorf("failed to get epoch data from database: %w", err) - } else if header == nil { - // if no header is given then skip the lookup in-memory - return epochData, nil } - epochData, err = s.getEpochDataFromMemory(epoch, header) - if err != nil { - return nil, fmt.Errorf("failed to get epoch data from memory: %w", err) + // lookup in-memory only if header is given + if header != nil && errors.Is(err, chaindb.ErrKeyNotFound) { + epochData, err = s.getEpochDataFromMemory(epoch, header) + if err != nil { + return nil, fmt.Errorf("failed to get epoch data from memory: %w", err) + } + } + + if epochData == nil { + return nil, fmt.Errorf("%w: for epoch %d and header with hash %s", + errEpochDataNotFound, epoch, header.Hash()) } return epochData, nil @@ -543,6 +550,10 @@ func (s *EpochState) StoreBABENextConfigData(epoch uint64, hash common.Hash, nex // check if the header is in the database then it's been finalized and // thus we can also set the corresponding EpochData in the database func (s *EpochState) FinalizeBABENextEpochData(finalizedHeader *types.Header) error { + if finalizedHeader.Number == 0 { + return nil + } + s.nextEpochDataLock.Lock() defer s.nextEpochDataLock.Unlock() @@ -600,6 +611,10 @@ func (s *EpochState) FinalizeBABENextEpochData(finalizedHeader *types.Header) er // check if the header is in the database then it's been finalized and // thus we can also set the corresponding NextConfigData in the database func (s *EpochState) FinalizeBABENextConfigData(finalizedHeader *types.Header) error { + if finalizedHeader.Number == 0 { + return nil + } + s.nextConfigDataLock.Lock() defer s.nextConfigDataLock.Unlock() diff --git a/lib/babe/verify.go b/lib/babe/verify.go index add58392a4..de07b62abc 100644 --- a/lib/babe/verify.go +++ b/lib/babe/verify.go @@ -341,6 +341,7 @@ func (b *verifier) verifyAuthorshipRight(header *types.Header) error { } // check if the producer has equivocated, ie. have they produced a conflicting block? + // hashes is hashes of all blocks with same block number as header.Number hashes := b.blockState.GetAllBlocksAtDepth(header.ParentHash) for _, hash := range hashes { @@ -364,6 +365,7 @@ func (b *verifier) verifyAuthorshipRight(header *types.Header) error { existingBlockProducerIndex = d.AuthorityIndex } + // same authority won't produce two different blocks at the same block number if currentBlockProducerIndex == existingBlockProducerIndex && hash != header.Hash() { return ErrProducerEquivocated } diff --git a/pkg/scale/varying_data_type.go b/pkg/scale/varying_data_type.go index 137735d5bd..9f6cfeeaa8 100644 --- a/pkg/scale/varying_data_type.go +++ b/pkg/scale/varying_data_type.go @@ -12,7 +12,7 @@ type VaryingDataTypeValue interface { Index() uint } -// VaryingDataTypeSlice is used to represent []VaryingDataType. SCALE requires knowledge +// VaryingDataTypeSlice is used to represent []VaryingDataType. SCALE requires knowledge // of the underlying data, so it is required to have the VaryingDataType required for decoding type VaryingDataTypeSlice struct { VaryingDataType From 78863183c5f8868b0735f28c81045b1ee0f1e816 Mon Sep 17 00:00:00 2001 From: Edward Mack Date: Mon, 20 Jun 2022 11:29:02 -0400 Subject: [PATCH 14/48] fix(trie): Panic when deleting nonexistent keys from trie (GSR-10) (#2609) * add check for common key length * add test case for nonexistent key --- lib/trie/trie.go | 4 ++++ lib/trie/trie_test.go | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/lib/trie/trie.go b/lib/trie/trie.go index 988afa3262..8f594e4180 100644 --- a/lib/trie/trie.go +++ b/lib/trie/trie.go @@ -969,6 +969,10 @@ func (t *Trie) deleteBranch(branch *Node, key []byte) ( } commonPrefixLength := lenCommonPrefix(branch.Key, key) + keyDoesNotExist := commonPrefixLength == len(key) + if keyDoesNotExist { + return branch, false, 0 + } childIndex := key[commonPrefixLength] childKey := key[commonPrefixLength+1:] child := branch.Children[childIndex] diff --git a/lib/trie/trie_test.go b/lib/trie/trie_test.go index e05587477b..6069af82f1 100644 --- a/lib/trie/trie_test.go +++ b/lib/trie/trie_test.go @@ -3573,6 +3573,40 @@ func Test_Trie_delete(t *testing.T) { }, updated: true, }, + "handle nonexistent key (no op)": { + trie: Trie{ + generation: 1, + }, + parent: &Node{ + Key: []byte{1, 0, 2, 3}, + Descendants: 1, + Children: padRightChildren([]*Node{ + { // full key 1, 0, 2 + Key: []byte{2}, + Value: []byte{1}, + }, + { // full key 1, 1, 2 + Key: []byte{2}, + Value: []byte{2}, + }, + }), + }, + key: []byte{1, 0, 2}, + newParent: &Node{ + Key: []byte{1, 0, 2, 3}, + Descendants: 1, + Children: padRightChildren([]*Node{ + { // full key 1, 0, 2 + Key: []byte{2}, + Value: []byte{1}, + }, + { // full key 1, 1, 2 + Key: []byte{2}, + Value: []byte{2}, + }, + }), + }, + }, } for name, testCase := range testCases { From 1072888bb936239f6cd362ede9bc49a02898e43d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Tue, 21 Jun 2022 08:09:37 -0400 Subject: [PATCH 15/48] fix(lib/babe): check if authority index is in the `authorities` range (#2601) --- lib/babe/verify.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/babe/verify.go b/lib/babe/verify.go index de07b62abc..671114ef2d 100644 --- a/lib/babe/verify.go +++ b/lib/babe/verify.go @@ -390,7 +390,7 @@ func (b *verifier) verifyPreRuntimeDigest(digest *types.PreRuntimeDigest) (scale authIdx = d.AuthorityIndex } - if len(b.authorities) <= int(authIdx) { + if uint64(len(b.authorities)) <= uint64(authIdx) { logger.Tracef("verifyPreRuntimeDigest invalid auth index %d, we have %d auths", authIdx, len(b.authorities)) return nil, ErrInvalidBlockProducerIndex From 81cf2de7c2d1aaa865ee62e498dc5a0793b476f0 Mon Sep 17 00:00:00 2001 From: Edward Mack Date: Tue, 21 Jun 2022 11:06:12 -0400 Subject: [PATCH 16/48] rename integeration to correct integration (#2616) --- ...sync_integeration_test.go => chain_sync_integration_test.go} | 0 ...egeration_test.go => disjoint_block_set_integration_test.go} | 0 ...message_integeration_test.go => message_integration_test.go} | 2 +- .../{syncer_integeration_test.go => syncer_integration_test.go} | 0 ...ncer_integeration_test.go => tip_syncer_integration_test.go} | 0 5 files changed, 1 insertion(+), 1 deletion(-) rename dot/sync/{chain_sync_integeration_test.go => chain_sync_integration_test.go} (100%) rename dot/sync/{disjoint_block_set_integeration_test.go => disjoint_block_set_integration_test.go} (100%) rename dot/sync/{message_integeration_test.go => message_integration_test.go} (99%) rename dot/sync/{syncer_integeration_test.go => syncer_integration_test.go} (100%) rename dot/sync/{tip_syncer_integeration_test.go => tip_syncer_integration_test.go} (100%) diff --git a/dot/sync/chain_sync_integeration_test.go b/dot/sync/chain_sync_integration_test.go similarity index 100% rename from dot/sync/chain_sync_integeration_test.go rename to dot/sync/chain_sync_integration_test.go diff --git a/dot/sync/disjoint_block_set_integeration_test.go b/dot/sync/disjoint_block_set_integration_test.go similarity index 100% rename from dot/sync/disjoint_block_set_integeration_test.go rename to dot/sync/disjoint_block_set_integration_test.go diff --git a/dot/sync/message_integeration_test.go b/dot/sync/message_integration_test.go similarity index 99% rename from dot/sync/message_integeration_test.go rename to dot/sync/message_integration_test.go index 3db0809306..62475f3ce6 100644 --- a/dot/sync/message_integeration_test.go +++ b/dot/sync/message_integration_test.go @@ -364,7 +364,7 @@ func TestService_CreateBlockResponse_Descending_EndHash(t *testing.T) { require.Equal(t, uint(1), resp.BlockData[127].Number()) } -func TestService_checkOrGetDescendantHash_integeration(t *testing.T) { +func TestService_checkOrGetDescendantHash_integration(t *testing.T) { t.Parallel() s := newTestSyncer(t) branches := map[uint]int{ diff --git a/dot/sync/syncer_integeration_test.go b/dot/sync/syncer_integration_test.go similarity index 100% rename from dot/sync/syncer_integeration_test.go rename to dot/sync/syncer_integration_test.go diff --git a/dot/sync/tip_syncer_integeration_test.go b/dot/sync/tip_syncer_integration_test.go similarity index 100% rename from dot/sync/tip_syncer_integeration_test.go rename to dot/sync/tip_syncer_integration_test.go From 914a747a1c1e4d263b92f33008f6341997f84b0b Mon Sep 17 00:00:00 2001 From: Timothy Wu Date: Thu, 23 Jun 2022 11:49:59 -0400 Subject: [PATCH 17/48] feat(pkg/scale): support for custom `VaryingDataType` types (#2612) * wip decode functionality * wip encode custom vdt * refactor tests * more tests, fix decode for nested vdt * update readme, provide example of nested VDTs * add copyright * fix lint --- pkg/scale/README.md | 196 ++++++++++++++++- pkg/scale/decode.go | 27 ++- pkg/scale/encode.go | 12 +- .../varying_data_type_nested_example_test.go | 186 ++++++++++++++++ pkg/scale/varying_data_type_nested_test.go | 199 ++++++++++++++++++ pkg/scale/varying_data_type_test.go | 49 ++++- 6 files changed, 658 insertions(+), 11 deletions(-) create mode 100644 pkg/scale/varying_data_type_nested_example_test.go create mode 100644 pkg/scale/varying_data_type_nested_test.go diff --git a/pkg/scale/README.md b/pkg/scale/README.md index 6e97e6b1eb..283f3492c8 100644 --- a/pkg/scale/README.md +++ b/pkg/scale/README.md @@ -82,7 +82,7 @@ SCALE uses a compact encoding for variable width unsigned integers. ### Basic Example Basic example which encodes and decodes a `uint`. -``` +```go import ( "fmt" "github.com/ChainSafe/gossamer/pkg/scale" @@ -111,7 +111,7 @@ func ExampleBasic() { Use the `scale` struct tag for struct fields to conform to specific encoding sequence of struct field values. A struct tag of `"-"` will be omitted from encoding and decoding. -``` +```go import ( "fmt" "github.com/ChainSafe/gossamer/pkg/scale" @@ -159,7 +159,7 @@ result := scale.NewResult(int32(0), int32(0) result.Set(scale.Ok, 10) ``` -``` +```go import ( "fmt" "github.com/ChainSafe/gossamer/pkg/scale" @@ -213,7 +213,7 @@ func ExampleResult() { A `VaryingDataType` is analogous to a Rust enum. A `VaryingDataType` needs to be constructed using the `NewVaryingDataType` constructor. `VaryingDataTypeValue` is an interface with one `Index() uint` method that needs to be implemented. The returned `uint` index should be unique per type and needs to be the same index as defined in the Rust enum to ensure interopability. To set the value of the `VaryingDataType`, the `VaryingDataType.Set()` function should be called with an associated `VaryingDataTypeValue`. -``` +```go import ( "fmt" "github.com/ChainSafe/gossamer/pkg/scale" @@ -323,4 +323,192 @@ func ExampleVaryingDataTypeSlice() { panic(fmt.Errorf("uh oh: %+v %+v", vdts, vdts1)) } } +``` + +#### Nested VaryingDataType + +See `varying_data_type_nested_example.go` for a working example of a custom `VaryingDataType` with another custom `VaryingDataType` as a value of the parent `VaryingDataType`. In the case of nested `VaryingDataTypes`, a custom type needs to be created for the child `VaryingDataType` because it needs to fulfill the `VaryingDataTypeValue` interface. + +```go +import ( + "fmt" + "reflect" + + "github.com/ChainSafe/gossamer/pkg/scale" +) + +// ParentVDT is a VaryingDataType that consists of multiple nested VaryingDataType +// instances (aka. a rust enum containing multiple enum options) +type ParentVDT scale.VaryingDataType + +// Set will set a VaryingDataTypeValue using the underlying VaryingDataType +func (pvdt *ParentVDT) Set(val scale.VaryingDataTypeValue) (err error) { + // cast to VaryingDataType to use VaryingDataType.Set method + vdt := scale.VaryingDataType(*pvdt) + err = vdt.Set(val) + if err != nil { + return + } + // store original ParentVDT with VaryingDataType that has been set + *pvdt = ParentVDT(vdt) + return +} + +// Value will return value from underying VaryingDataType +func (pvdt *ParentVDT) Value() (val scale.VaryingDataTypeValue) { + vdt := scale.VaryingDataType(*pvdt) + return vdt.Value() +} + +// NewParentVDT is constructor for ParentVDT +func NewParentVDT() ParentVDT { + // use standard VaryingDataType constructor to construct a VaryingDataType + vdt, err := scale.NewVaryingDataType(NewChildVDT(), NewOtherChildVDT()) + if err != nil { + panic(err) + } + // cast to ParentVDT + return ParentVDT(vdt) +} + +// ChildVDT type is used as a VaryingDataTypeValue for ParentVDT +type ChildVDT scale.VaryingDataType + +// Index fulfills the VaryingDataTypeValue interface. T +func (cvdt ChildVDT) Index() uint { + return 1 +} + +// Set will set a VaryingDataTypeValue using the underlying VaryingDataType +func (cvdt *ChildVDT) Set(val scale.VaryingDataTypeValue) (err error) { + // cast to VaryingDataType to use VaryingDataType.Set method + vdt := scale.VaryingDataType(*cvdt) + err = vdt.Set(val) + if err != nil { + return + } + // store original ParentVDT with VaryingDataType that has been set + *cvdt = ChildVDT(vdt) + return +} + +// Value will return value from underying VaryingDataType +func (cvdt *ChildVDT) Value() (val scale.VaryingDataTypeValue) { + vdt := scale.VaryingDataType(*cvdt) + return vdt.Value() +} + +// NewChildVDT is constructor for ChildVDT +func NewChildVDT() ChildVDT { + // use standard VaryingDataType constructor to construct a VaryingDataType + // constarined to types ChildInt16, ChildStruct, and ChildString + vdt, err := scale.NewVaryingDataType(ChildInt16(0), ChildStruct{}, ChildString("")) + if err != nil { + panic(err) + } + // cast to ParentVDT + return ChildVDT(vdt) +} + +// OtherChildVDT type is used as a VaryingDataTypeValue for ParentVDT +type OtherChildVDT scale.VaryingDataType + +// Index fulfills the VaryingDataTypeValue interface. +func (ocvdt OtherChildVDT) Index() uint { + return 2 +} + +// Set will set a VaryingDataTypeValue using the underlying VaryingDataType +func (cvdt *OtherChildVDT) Set(val scale.VaryingDataTypeValue) (err error) { + // cast to VaryingDataType to use VaryingDataType.Set method + vdt := scale.VaryingDataType(*cvdt) + err = vdt.Set(val) + if err != nil { + return + } + // store original ParentVDT with VaryingDataType that has been set + *cvdt = OtherChildVDT(vdt) + return +} + +// NewOtherChildVDT is constructor for OtherChildVDT +func NewOtherChildVDT() OtherChildVDT { + // use standard VaryingDataType constructor to construct a VaryingDataType + // constarined to types ChildInt16 and ChildStruct + vdt, err := scale.NewVaryingDataType(ChildInt16(0), ChildStruct{}, ChildString("")) + if err != nil { + panic(err) + } + // cast to ParentVDT + return OtherChildVDT(vdt) +} + +// ChildInt16 is used as a VaryingDataTypeValue for ChildVDT and OtherChildVDT +type ChildInt16 int16 + +// Index fulfills the VaryingDataTypeValue interface. The ChildVDT type is used as a +// VaryingDataTypeValue for ParentVDT +func (ci ChildInt16) Index() uint { + return 1 +} + +// ChildStruct is used as a VaryingDataTypeValue for ChildVDT and OtherChildVDT +type ChildStruct struct { + A string + B bool +} + +// Index fulfills the VaryingDataTypeValue interface +func (cs ChildStruct) Index() uint { + return 2 +} + +// ChildString is used as a VaryingDataTypeValue for ChildVDT and OtherChildVDT +type ChildString string + +// Index fulfills the VaryingDataTypeValue interface +func (cs ChildString) Index() uint { + return 3 +} + +func ExampleNestedVaryingDataType() { + parent := NewParentVDT() + + // populate parent with ChildVDT + child := NewChildVDT() + child.Set(ChildInt16(888)) + err := parent.Set(child) + if err != nil { + panic(err) + } + + // validate ParentVDT.Value() + fmt.Printf("parent.Value(): %+v\n", parent.Value()) + // should cast to ChildVDT, since that was set earlier + valChildVDT := parent.Value().(ChildVDT) + // validate ChildVDT.Value() as ChildInt16(888) + fmt.Printf("child.Value(): %+v\n", valChildVDT.Value()) + + // marshal into scale encoded bytes + bytes, err := scale.Marshal(parent) + if err != nil { + panic(err) + } + fmt.Printf("bytes: % x\n", bytes) + + // unmarshal into another ParentVDT + dstParent := NewParentVDT() + err = scale.Unmarshal(bytes, &dstParent) + if err != nil { + panic(err) + } + // assert both ParentVDT instances are the same + fmt.Println(reflect.DeepEqual(parent, dstParent)) + + // Output: + // parent.Value(): {value:888 cache:map[1:0 2:{A: B:false} 3:]} + // child.Value(): 888 + // bytes: 01 01 78 03 + // true +} ``` \ No newline at end of file diff --git a/pkg/scale/decode.go b/pkg/scale/decode.go index 73e19c85d1..4e427e1b35 100644 --- a/pkg/scale/decode.go +++ b/pkg/scale/decode.go @@ -149,7 +149,12 @@ func (ds *decodeState) unmarshal(dstv reflect.Value) (err error) { case reflect.Ptr: err = ds.decodePointer(dstv) case reflect.Struct: - err = ds.decodeStruct(dstv) + ok := reflect.ValueOf(in).CanConvert(reflect.TypeOf(VaryingDataType{})) + if ok { + err = ds.decodeCustomVaryingDataType(dstv) + } else { + err = ds.decodeStruct(dstv) + } case reflect.Array: err = ds.decodeArray(dstv) case reflect.Slice: @@ -344,6 +349,19 @@ func (ds *decodeState) decodeVaryingDataTypeSlice(dstv reflect.Value) (err error return } +func (ds *decodeState) decodeCustomVaryingDataType(dstv reflect.Value) (err error) { + initialType := dstv.Type() + converted := dstv.Convert(reflect.TypeOf(VaryingDataType{})) + tempVal := reflect.New(converted.Type()) + tempVal.Elem().Set(converted) + err = ds.decodeVaryingDataType(tempVal.Elem()) + if err != nil { + return + } + dstv.Set(tempVal.Elem().Convert(initialType)) + return +} + func (ds *decodeState) decodeVaryingDataType(dstv reflect.Value) (err error) { var b byte b, err = ds.ReadByte() @@ -358,12 +376,13 @@ func (ds *decodeState) decodeVaryingDataType(dstv reflect.Value) (err error) { return } - tempVal := reflect.New(reflect.TypeOf(val)).Elem() - err = ds.unmarshal(tempVal) + tempVal := reflect.New(reflect.TypeOf(val)) + tempVal.Elem().Set(reflect.ValueOf(val)) + err = ds.unmarshal(tempVal.Elem()) if err != nil { return } - err = vdt.Set(tempVal.Interface().(VaryingDataTypeValue)) + err = vdt.Set(tempVal.Elem().Interface().(VaryingDataTypeValue)) if err != nil { return } diff --git a/pkg/scale/encode.go b/pkg/scale/encode.go index 03f019b988..9074d03aa2 100644 --- a/pkg/scale/encode.go +++ b/pkg/scale/encode.go @@ -73,7 +73,12 @@ func (es *encodeState) marshal(in interface{}) (err error) { err = es.marshal(elem.Interface()) } case reflect.Struct: - err = es.encodeStruct(in) + ok := reflect.ValueOf(in).CanConvert(reflect.TypeOf(VaryingDataType{})) + if ok { + err = es.encodeCustomVaryingDataType(in) + } else { + err = es.encodeStruct(in) + } case reflect.Array: err = es.encodeArray(in) case reflect.Slice: @@ -148,6 +153,11 @@ func (es *encodeState) encodeResult(res Result) (err error) { return } +func (es *encodeState) encodeCustomVaryingDataType(in interface{}) (err error) { + vdt := reflect.ValueOf(in).Convert(reflect.TypeOf(VaryingDataType{})).Interface().(VaryingDataType) + return es.encodeVaryingDataType(vdt) +} + func (es *encodeState) encodeVaryingDataType(vdt VaryingDataType) (err error) { err = es.WriteByte(byte(vdt.value.Index())) if err != nil { diff --git a/pkg/scale/varying_data_type_nested_example_test.go b/pkg/scale/varying_data_type_nested_example_test.go new file mode 100644 index 0000000000..54e1e67530 --- /dev/null +++ b/pkg/scale/varying_data_type_nested_example_test.go @@ -0,0 +1,186 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package scale_test + +import ( + "fmt" + "reflect" + + "github.com/ChainSafe/gossamer/pkg/scale" +) + +// ParentVDT is a VaryingDataType that consists of multiple nested VaryingDataType +// instances (aka. a rust enum containing multiple enum options) +type ParentVDT scale.VaryingDataType + +// Set will set a VaryingDataTypeValue using the underlying VaryingDataType +func (pvdt *ParentVDT) Set(val scale.VaryingDataTypeValue) (err error) { + // cast to VaryingDataType to use VaryingDataType.Set method + vdt := scale.VaryingDataType(*pvdt) + err = vdt.Set(val) + if err != nil { + return + } + // store original ParentVDT with VaryingDataType that has been set + *pvdt = ParentVDT(vdt) + return +} + +// Value will return value from underying VaryingDataType +func (pvdt *ParentVDT) Value() (val scale.VaryingDataTypeValue) { + vdt := scale.VaryingDataType(*pvdt) + return vdt.Value() +} + +// NewParentVDT is constructor for ParentVDT +func NewParentVDT() ParentVDT { + // use standard VaryingDataType constructor to construct a VaryingDataType + vdt, err := scale.NewVaryingDataType(NewChildVDT(), NewOtherChildVDT()) + if err != nil { + panic(err) + } + // cast to ParentVDT + return ParentVDT(vdt) +} + +// ChildVDT type is used as a VaryingDataTypeValue for ParentVDT +type ChildVDT scale.VaryingDataType + +// Index fulfils the VaryingDataTypeValue interface. T +func (cvdt ChildVDT) Index() uint { + return 1 +} + +// Set will set a VaryingDataTypeValue using the underlying VaryingDataType +func (cvdt *ChildVDT) Set(val scale.VaryingDataTypeValue) (err error) { + // cast to VaryingDataType to use VaryingDataType.Set method + vdt := scale.VaryingDataType(*cvdt) + err = vdt.Set(val) + if err != nil { + return + } + // store original ParentVDT with VaryingDataType that has been set + *cvdt = ChildVDT(vdt) + return +} + +// Value will return value from underying VaryingDataType +func (cvdt *ChildVDT) Value() (val scale.VaryingDataTypeValue) { + vdt := scale.VaryingDataType(*cvdt) + return vdt.Value() +} + +// NewChildVDT is constructor for ChildVDT +func NewChildVDT() ChildVDT { + // use standard VaryingDataType constructor to construct a VaryingDataType + // constarined to types ChildInt16, ChildStruct, and ChildString + vdt, err := scale.NewVaryingDataType(ChildInt16(0), ChildStruct{}, ChildString("")) + if err != nil { + panic(err) + } + // cast to ParentVDT + return ChildVDT(vdt) +} + +// OtherChildVDT type is used as a VaryingDataTypeValue for ParentVDT +type OtherChildVDT scale.VaryingDataType + +// Index fulfils the VaryingDataTypeValue interface. +func (ocvdt OtherChildVDT) Index() uint { + return 2 +} + +// Set will set a VaryingDataTypeValue using the underlying VaryingDataType +func (cvdt *OtherChildVDT) Set(val scale.VaryingDataTypeValue) (err error) { //nolint:revive + // cast to VaryingDataType to use VaryingDataType.Set method + vdt := scale.VaryingDataType(*cvdt) + err = vdt.Set(val) + if err != nil { + return + } + // store original ParentVDT with VaryingDataType that has been set + *cvdt = OtherChildVDT(vdt) + return +} + +// NewOtherChildVDT is constructor for OtherChildVDT +func NewOtherChildVDT() OtherChildVDT { + // use standard VaryingDataType constructor to construct a VaryingDataType + // constarined to types ChildInt16 and ChildStruct + vdt, err := scale.NewVaryingDataType(ChildInt16(0), ChildStruct{}, ChildString("")) + if err != nil { + panic(err) + } + // cast to ParentVDT + return OtherChildVDT(vdt) +} + +// ChildInt16 is used as a VaryingDataTypeValue for ChildVDT and OtherChildVDT +type ChildInt16 int16 + +// Index fulfils the VaryingDataTypeValue interface. The ChildVDT type is used as a +// VaryingDataTypeValue for ParentVDT +func (ci ChildInt16) Index() uint { + return 1 +} + +// ChildStruct is used as a VaryingDataTypeValue for ChildVDT and OtherChildVDT +type ChildStruct struct { + A string + B bool +} + +// Index fulfils the VaryingDataTypeValue interface +func (cs ChildStruct) Index() uint { + return 2 +} + +// ChildString is used as a VaryingDataTypeValue for ChildVDT and OtherChildVDT +type ChildString string + +// Index fulfils the VaryingDataTypeValue interface +func (cs ChildString) Index() uint { + return 3 +} + +func Example() { + parent := NewParentVDT() + + // populate parent with ChildVDT + child := NewChildVDT() + child.Set(ChildInt16(888)) + err := parent.Set(child) + if err != nil { + panic(err) + } + + // validate ParentVDT.Value() + fmt.Printf("parent.Value(): %+v\n", parent.Value()) + // should cast to ChildVDT, since that was set earlier + valChildVDT := parent.Value().(ChildVDT) + // validate ChildVDT.Value() as ChildInt16(888) + fmt.Printf("child.Value(): %+v\n", valChildVDT.Value()) + + // marshal into scale encoded bytes + bytes, err := scale.Marshal(parent) + if err != nil { + panic(err) + } + fmt.Printf("bytes: % x\n", bytes) + + // unmarshal into another ParentVDT + dstParent := NewParentVDT() + err = scale.Unmarshal(bytes, &dstParent) + if err != nil { + panic(err) + } + // assert both ParentVDT instances are the same + fmt.Println(reflect.DeepEqual(parent, dstParent)) + + // Output: + // parent.Value(): {value:888 cache:map[1:0 2:{A: B:false} 3:]} + // child.Value(): 888 + // bytes: 01 01 78 03 + // true +} diff --git a/pkg/scale/varying_data_type_nested_test.go b/pkg/scale/varying_data_type_nested_test.go new file mode 100644 index 0000000000..dec5dc2413 --- /dev/null +++ b/pkg/scale/varying_data_type_nested_test.go @@ -0,0 +1,199 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package scale + +import ( + "math/big" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" +) + +type parentVDT VaryingDataType + +func (pvdt *parentVDT) Set(val VaryingDataTypeValue) (err error) { + vdt := VaryingDataType(*pvdt) + err = vdt.Set(val) + if err != nil { + return + } + *pvdt = parentVDT(vdt) + return +} + +func mustNewParentVDT() parentVDT { + vdt, err := NewVaryingDataType(mustNewChildVDT(), mustNewChildVDT1()) + if err != nil { + panic(err) + } + return parentVDT(vdt) +} + +type childVDT VaryingDataType + +func (cvdt childVDT) Index() uint { + return 1 +} + +func mustNewChildVDT() childVDT { + vdt, err := NewVaryingDataType(VDTValue{}, VDTValue1{}, VDTValue2{}, VDTValue3(0)) + if err != nil { + panic(err) + } + return childVDT(vdt) +} + +func mustNewChildVDTAndSet(vdtv VaryingDataTypeValue) childVDT { + vdt, err := NewVaryingDataType(VDTValue{}, VDTValue1{}, VDTValue2{}, VDTValue3(0)) + if err != nil { + panic(err) + } + err = vdt.Set(vdtv) + if err != nil { + panic(err) + } + return childVDT(vdt) +} + +type childVDT1 VaryingDataType + +func (cvdt childVDT1) Index() uint { + return 2 +} + +func mustNewChildVDT1() childVDT1 { + vdt, err := NewVaryingDataType(VDTValue{}, VDTValue1{}, VDTValue2{}) + if err != nil { + panic(err) + } + return childVDT1(vdt) +} + +func mustNewChildVDT1AndSet(vdtv VaryingDataTypeValue) childVDT1 { + vdt, err := NewVaryingDataType(VDTValue{}, VDTValue1{}, VDTValue2{}) + if err != nil { + panic(err) + } + err = vdt.Set(vdtv) + if err != nil { + panic(err) + } + return childVDT1(vdt) +} + +type constructorTest struct { + name string + newIn func(t *testing.T) interface{} + want []byte + wantErr bool +} + +var nestedVaryingDataTypeTests = []constructorTest{ + { + name: "ParentVDT with ChildVDT", + newIn: func(t *testing.T) interface{} { + pvdt := mustNewParentVDT() + err := pvdt.Set(mustNewChildVDTAndSet(VDTValue3(16383))) + if err != nil { + t.Fatalf("%v", err) + } + return pvdt + }, + want: newWant( + // index of childVDT + []byte{1}, + // index of VDTValue3 + []byte{4}, + // encoding of int16 + []byte{0xff, 0x3f}, + ), + }, + { + name: "ParentVDT with ChildVDT1", + newIn: func(t *testing.T) interface{} { + pvdt := mustNewParentVDT() + err := pvdt.Set(mustNewChildVDT1AndSet( + VDTValue{ + A: big.NewInt(1073741823), + B: int(1073741823), + C: uint(1073741823), + D: int8(1), + E: uint8(1), + F: int16(16383), + G: uint16(16383), + H: int32(1073741823), + I: uint32(1073741823), + J: int64(9223372036854775807), + K: uint64(9223372036854775807), + L: byteArray(64), + M: testStrings[1], + N: true, + }, + )) + if err != nil { + t.Fatalf("%v", err) + } + return pvdt + }, + want: newWant( + // index of childVDT1 + []byte{2}, + // index of VDTValue + []byte{1}, + // encoding of struct + []byte{ + 0xfe, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xff, + 0x01, + 0x01, + 0xff, 0x3f, + 0xff, 0x3f, + 0xff, 0xff, 0xff, 0x3f, + 0xff, 0xff, 0xff, 0x3f, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + }, + append([]byte{0x01, 0x01}, byteArray(64)...), + append([]byte{0xC2, 0x02, 0x01, 0x00}, testStrings[1]...), + []byte{0x01}, + ), + }, +} + +func Test_encodeState_encodeCustomVaryingDataType_nested(t *testing.T) { + for _, tt := range nestedVaryingDataTypeTests { + t.Run(tt.name, func(t *testing.T) { + b, err := Marshal(tt.newIn(t)) + if (err != nil) != tt.wantErr { + t.Errorf("Marshal() error = %v, wantErr %v", err, tt.wantErr) + } + if diff := cmp.Diff(b, tt.want); diff != "" { + t.Errorf("Marshal() diff: %s", diff) + } + }) + } +} + +func Test_decodeState_decodeCustomVaryingDataType_nested(t *testing.T) { + for _, tt := range nestedVaryingDataTypeTests { + t.Run(tt.name, func(t *testing.T) { + dst := mustNewParentVDT() + if err := Unmarshal(tt.want, &dst); (err != nil) != tt.wantErr { + t.Errorf("decodeState.unmarshal() error = %v, wantErr %v", err, tt.wantErr) + return + } + expected := tt.newIn(t) + + diff := cmp.Diff(dst, expected, + cmp.AllowUnexported(parentVDT{}, childVDT{}, childVDT1{}), + cmpopts.IgnoreUnexported(big.Int{}, VDTValue2{}, MyStructWithIgnore{}), + ) + if diff != "" { + t.Errorf("decodeState.unmarshal() = %s", diff) + } + }) + } +} diff --git a/pkg/scale/varying_data_type_test.go b/pkg/scale/varying_data_type_test.go index 0a794d4b15..3c7b50d6ba 100644 --- a/pkg/scale/varying_data_type_test.go +++ b/pkg/scale/varying_data_type_test.go @@ -29,6 +29,8 @@ func mustNewVaryingDataTypeAndSet(value VaryingDataTypeValue, values ...VaryingD return } +type customVDT VaryingDataType + type VDTValue struct { A *big.Int B int @@ -294,10 +296,10 @@ func Test_encodeState_encodeVaryingDataType(t *testing.T) { es := &encodeState{fieldScaleIndicesCache: cache} vdt := tt.in.(VaryingDataType) if err := es.marshal(vdt); (err != nil) != tt.wantErr { - t.Errorf("encodeState.encodeStruct() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("encodeState.marshal() error = %v, wantErr %v", err, tt.wantErr) } if !reflect.DeepEqual(es.Buffer.Bytes(), tt.want) { - t.Errorf("encodeState.encodeStruct() = %v, want %v", es.Buffer.Bytes(), tt.want) + t.Errorf("encodeState.marshal() = %v, want %v", es.Buffer.Bytes(), tt.want) } }) } @@ -324,6 +326,49 @@ func Test_decodeState_decodeVaryingDataType(t *testing.T) { } } +func Test_encodeState_encodeCustomVaryingDataType(t *testing.T) { + for _, tt := range varyingDataTypeTests { + t.Run(tt.name, func(t *testing.T) { + es := &encodeState{fieldScaleIndicesCache: cache} + vdt := tt.in.(VaryingDataType) + cvdt := customVDT(vdt) + if err := es.marshal(cvdt); (err != nil) != tt.wantErr { + t.Errorf("encodeState.encodeStruct() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(es.Buffer.Bytes(), tt.want) { + t.Errorf("encodeState.encodeStruct() = %v, want %v", es.Buffer.Bytes(), tt.want) + } + }) + } +} +func Test_decodeState_decodeCustomVaryingDataType(t *testing.T) { + for _, tt := range varyingDataTypeTests { + t.Run(tt.name, func(t *testing.T) { + vdt, err := NewVaryingDataType(VDTValue{}, VDTValue1{}, VDTValue2{}, VDTValue3(0)) + if err != nil { + t.Errorf("%v", err) + return + } + dst := customVDT(vdt) + if err := Unmarshal(tt.want, &dst); (err != nil) != tt.wantErr { + t.Errorf("decodeState.unmarshal() error = %v, wantErr %v", err, tt.wantErr) + return + } + + dstVDT := reflect.ValueOf(tt.in).Convert(reflect.TypeOf(VaryingDataType{})).Interface().(VaryingDataType) + inVDT := reflect.ValueOf(tt.in).Convert(reflect.TypeOf(VaryingDataType{})).Interface().(VaryingDataType) + diff := cmp.Diff(dstVDT.Value(), inVDT.Value(), + cmpopts.IgnoreUnexported(big.Int{}, VDTValue2{}, MyStructWithIgnore{})) + if diff != "" { + t.Errorf("decodeState.unmarshal() = %s", diff) + } + if reflect.TypeOf(dst) != reflect.TypeOf(customVDT{}) { + t.Errorf("types mismatch dst: %v expected: %v", reflect.TypeOf(dst), reflect.TypeOf(customVDT{})) + } + }) + } +} + func TestNewVaryingDataType(t *testing.T) { type args struct { values []VaryingDataTypeValue From 4f2173bdec629d2d8d1cb16d52aa930da1b3c1a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 21:03:11 -0400 Subject: [PATCH 18/48] chore(deps): bump github.com/ethereum/go-ethereum (#2614) Bumps [github.com/ethereum/go-ethereum](https://github.com/ethereum/go-ethereum) from 1.10.18 to 1.10.19. - [Release notes](https://github.com/ethereum/go-ethereum/releases) - [Commits](https://github.com/ethereum/go-ethereum/compare/v1.10.18...v1.10.19) --- updated-dependencies: - dependency-name: github.com/ethereum/go-ethereum dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 68 ++-------------------------------------------------------- 2 files changed, 3 insertions(+), 67 deletions(-) diff --git a/go.mod b/go.mod index 887976ac2f..556da902e4 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de github.com/disiqueira/gotree v1.0.0 github.com/docker/docker v20.10.17+incompatible - github.com/ethereum/go-ethereum v1.10.18 + github.com/ethereum/go-ethereum v1.10.19 github.com/fatih/color v1.13.0 github.com/go-playground/validator/v10 v10.11.0 github.com/golang/mock v1.6.0 diff --git a/go.sum b/go.sum index 7cf1ca16d8..c780fc582f 100644 --- a/go.sum +++ b/go.sum @@ -48,9 +48,6 @@ github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIo github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= @@ -138,7 +135,6 @@ github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrf github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -200,7 +196,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -220,8 +215,6 @@ github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= -github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= @@ -245,16 +238,10 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/disiqueira/gotree v1.0.0 h1:en5wk87n7/Jyk6gVME3cx3xN9KmUCstJ1IjHr4Se4To= github.com/disiqueira/gotree v1.0.0/go.mod h1:7CwL+VWsWAU95DovkdRZAtA7YbtHwGk+tLV/kNi8niU= github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= -github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -275,13 +262,12 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg= -github.com/ethereum/go-ethereum v1.10.18 h1:hLEd5M+UD0GJWPaROiYMRgZXl6bi5YwoTJSthsx5CZw= -github.com/ethereum/go-ethereum v1.10.18/go.mod h1:RD3NhcSBjZpj3k+SnQq24wBrmnmie78P5R/P62iNBD8= +github.com/ethereum/go-ethereum v1.10.19 h1:EOR5JbL4MD5yeOqv8W2iC1s4NximrTjqFccUz8lyBRA= +github.com/ethereum/go-ethereum v1.10.19/go.mod h1:IJBNMtzKcNHPtllYihy6BL2IgK1u+32JriaTbdt4v+w= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -297,16 +283,12 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -322,8 +304,6 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= @@ -333,7 +313,6 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= @@ -349,7 +328,6 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -392,7 +370,6 @@ github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -430,7 +407,6 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= @@ -454,7 +430,6 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -472,7 +447,6 @@ github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoP github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -513,12 +487,9 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= -github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= -github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= @@ -626,7 +597,6 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -659,11 +629,8 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= @@ -900,8 +867,6 @@ github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0Q github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= @@ -912,13 +877,10 @@ github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 h1:/rpmWuGvceLwwWuaKPdjpR4JJEU github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= @@ -928,7 +890,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= @@ -975,14 +936,11 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -1079,7 +1037,6 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= @@ -1281,7 +1238,6 @@ github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpP github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= @@ -1301,9 +1257,6 @@ github.com/urfave/cli v1.22.9 h1:cv3/KhXGBGjEXLC4bH0sLuJ9BewaAbpk5oyMOveu4pw= github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vedhavyas/go-subkey v1.0.2 h1:EW6U+1us4k38AtrBfFOEZTpW9FcF/cIUOxw/pHbNNQ0= github.com/vedhavyas/go-subkey v1.0.2/go.mod h1:T9SEs84XZxRULMZLWtIl48s9rBNE7h6GnkqTgJR8+MU= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= @@ -1334,7 +1287,6 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= @@ -1402,13 +1354,11 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1425,7 +1375,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1452,8 +1401,6 @@ golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1498,7 +1445,6 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -1509,10 +1455,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1563,7 +1506,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1597,7 +1539,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1622,7 +1563,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1634,7 +1574,6 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -1643,7 +1582,6 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1675,7 +1613,6 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191126055441-b0650ceb63d9/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1707,7 +1644,6 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 h1:0c3L82FDQ5rt1bjTBlchS8t6RQ6299/+5bWMnRLh+uI= -golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 2a6fa83591d73c75d0e7a8f3c5013ad718ec5898 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Jun 2022 08:36:49 -0400 Subject: [PATCH 19/48] chore(deps): bump github.com/multiformats/go-multiaddr (#2620) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 556da902e4..b84ef78bae 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/libp2p/go-libp2p-discovery v0.5.1 github.com/libp2p/go-libp2p-kad-dht v0.11.1 github.com/libp2p/go-libp2p-peerstore v0.3.0 - github.com/multiformats/go-multiaddr v0.5.0 + github.com/multiformats/go-multiaddr v0.6.0 github.com/nanobox-io/golang-scribble v0.0.0-20190309225732-aa3e7c118975 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/perlin-network/life v0.0.0-20191203030451-05c0e0f7eaea diff --git a/go.sum b/go.sum index c780fc582f..5c5b729787 100644 --- a/go.sum +++ b/go.sum @@ -964,8 +964,8 @@ github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4 github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= -github.com/multiformats/go-multiaddr v0.5.0 h1:i/JuOoVg4szYQ4YEzDGtb2h0o8M7CG/Yq6cGlcjWZpM= -github.com/multiformats/go-multiaddr v0.5.0/go.mod h1:3KAxNkUqLTJ20AAwN4XVX4kZar+bR+gh4zgbfr3SNug= +github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg= +github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= From 1826896e958d08aaf34660473ba30accc322179b Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Wed, 29 Jun 2022 08:43:19 -0400 Subject: [PATCH 20/48] fix(trie): descendants count for clear prefix (#2606) --- lib/trie/trie.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/trie/trie.go b/lib/trie/trie.go index 8f594e4180..bd26ceddfd 100644 --- a/lib/trie/trie.go +++ b/lib/trie/trie.go @@ -877,10 +877,11 @@ func (t *Trie) clearPrefix(parent *Node, prefix []byte) ( return parent, nodesRemoved } - nodesRemoved = 1 + nodesRemoved = 1 + child.Descendants copySettings := node.DefaultCopySettings branch = t.prepBranchForMutation(branch, copySettings) branch.Children[childIndex] = nil + branch.Descendants -= nodesRemoved var branchChildMerged bool newParent, branchChildMerged = handleDeletion(branch, prefix) if branchChildMerged { From f97e0efada949f78fcb55f5d383f8834a37eae17 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Wed, 29 Jun 2022 08:44:21 -0400 Subject: [PATCH 21/48] feat(e2e): build Gossamer on any test run (#2608) --- .../polkadotjs_test/start_polkadotjs_test.go | 5 ++- tests/rpc/rpc_00_test.go | 18 +++++++++-- tests/rpc/rpc_01-system_test.go | 6 ---- tests/rpc/rpc_02-author_test.go | 11 ------- tests/rpc/rpc_03-chain_test.go | 11 ------- tests/rpc/rpc_04-offchain_test.go | 6 ---- tests/rpc/rpc_05-state_test.go | 16 ---------- tests/rpc/rpc_06-engine_test.go | 6 ---- tests/rpc/rpc_07-payment_test.go | 6 ---- tests/rpc/rpc_08-contracts_test.go | 6 ---- tests/rpc/rpc_09-babe_test.go | 6 ---- tests/stress/stress_test.go | 6 ++++ tests/sync/sync_test.go | 3 ++ tests/utils/build.go | 32 +++++++++++++++++++ 14 files changed, 60 insertions(+), 78 deletions(-) create mode 100644 tests/utils/build.go diff --git a/tests/polkadotjs_test/start_polkadotjs_test.go b/tests/polkadotjs_test/start_polkadotjs_test.go index 26e3bf775a..7886a18d2c 100644 --- a/tests/polkadotjs_test/start_polkadotjs_test.go +++ b/tests/polkadotjs_test/start_polkadotjs_test.go @@ -25,9 +25,12 @@ func TestStartGossamerAndPolkadotAPI(t *testing.T) { return } + err := utils.BuildGossamer() + require.NoError(t, err) + const nodePackageManager = "npm" t.Logf("Checking %s is available...", nodePackageManager) - _, err := exec.LookPath(nodePackageManager) + _, err = exec.LookPath(nodePackageManager) if err != nil { t.Fatalf("%s is not available: %s", nodePackageManager, err) } diff --git a/tests/rpc/rpc_00_test.go b/tests/rpc/rpc_00_test.go index 6c4d1e30ff..549333094a 100644 --- a/tests/rpc/rpc_00_test.go +++ b/tests/rpc/rpc_00_test.go @@ -6,16 +6,28 @@ package rpc import ( "context" "fmt" + "os" "testing" "time" + "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/rpc" "github.com/stretchr/testify/require" ) -var ( - rpcSuite = "rpc" -) +func TestMain(m *testing.M) { + if utils.MODE != "rpc" { + fmt.Println("Going to skip RPC suite tests") + os.Exit(0) + } + + err := utils.BuildGossamer() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + os.Exit(m.Run()) +} type testCase struct { description string diff --git a/tests/rpc/rpc_01-system_test.go b/tests/rpc/rpc_01-system_test.go index 6b386187c7..0a7f22064b 100644 --- a/tests/rpc/rpc_01-system_test.go +++ b/tests/rpc/rpc_01-system_test.go @@ -11,7 +11,6 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/lib/common" libutils "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/ChainSafe/gossamer/tests/utils/node" "github.com/ChainSafe/gossamer/tests/utils/retry" @@ -22,11 +21,6 @@ import ( const peerIDRegex = `^[a-zA-Z0-9]{52}$` func TestSystemRPC(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - const testTimeout = 8 * time.Minute ctx, cancel := context.WithTimeout(context.Background(), testTimeout) diff --git a/tests/rpc/rpc_02-author_test.go b/tests/rpc/rpc_02-author_test.go index 1716eef32e..c73de804a8 100644 --- a/tests/rpc/rpc_02-author_test.go +++ b/tests/rpc/rpc_02-author_test.go @@ -13,7 +13,6 @@ import ( "github.com/centrifuge/go-substrate-rpc-client/v3/scale" libutils "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/ChainSafe/gossamer/tests/utils/node" "github.com/ChainSafe/gossamer/tests/utils/retry" @@ -24,11 +23,6 @@ import ( ) func TestAuthorSubmitExtrinsic(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetDevGenesisSpecPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath @@ -100,11 +94,6 @@ func TestAuthorSubmitExtrinsic(t *testing.T) { } func TestAuthorRPC(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetGssmrGenesisRawPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath diff --git a/tests/rpc/rpc_03-chain_test.go b/tests/rpc/rpc_03-chain_test.go index 6f6be2e145..107d17a911 100644 --- a/tests/rpc/rpc_03-chain_test.go +++ b/tests/rpc/rpc_03-chain_test.go @@ -15,7 +15,6 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc/subscription" "github.com/ChainSafe/gossamer/lib/common" libutils "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/ChainSafe/gossamer/tests/utils/node" "github.com/ChainSafe/gossamer/tests/utils/retry" @@ -32,11 +31,6 @@ const ( ) func TestChainRPC(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetDevGenesisSpecPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath @@ -129,11 +123,6 @@ func TestChainRPC(t *testing.T) { } func TestChainSubscriptionRPC(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetDevGenesisSpecPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath diff --git a/tests/rpc/rpc_04-offchain_test.go b/tests/rpc/rpc_04-offchain_test.go index 78ec0b4739..03bc3fdc13 100644 --- a/tests/rpc/rpc_04-offchain_test.go +++ b/tests/rpc/rpc_04-offchain_test.go @@ -8,7 +8,6 @@ import ( "testing" libutils "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/ChainSafe/gossamer/tests/utils/node" ) @@ -16,11 +15,6 @@ import ( func TestOffchainRPC(t *testing.T) { t.SkipNow() // TODO - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetGssmrGenesisRawPathTest(t) tomlConfig := config.Default() tomlConfig.Core.BABELead = true diff --git a/tests/rpc/rpc_05-state_test.go b/tests/rpc/rpc_05-state_test.go index 943c5b79f5..b35316c822 100644 --- a/tests/rpc/rpc_05-state_test.go +++ b/tests/rpc/rpc_05-state_test.go @@ -12,7 +12,6 @@ import ( "github.com/ChainSafe/gossamer/dot/rpc/modules" "github.com/ChainSafe/gossamer/lib/common" libutils "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/ChainSafe/gossamer/tests/utils/node" "github.com/ChainSafe/gossamer/tests/utils/rpc" @@ -20,11 +19,6 @@ import ( ) func TestStateRPCResponseValidation(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetGssmrGenesisRawPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath @@ -168,11 +162,6 @@ func TestStateRPCResponseValidation(t *testing.T) { } func TestStateRPCAPI(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetGssmrGenesisRawPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath @@ -374,11 +363,6 @@ func TestStateRPCAPI(t *testing.T) { } func TestRPCStructParamUnmarshal(t *testing.T) { - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetDevGenesisSpecPathTest(t) tomlConfig := config.Default() tomlConfig.Core.BABELead = true diff --git a/tests/rpc/rpc_06-engine_test.go b/tests/rpc/rpc_06-engine_test.go index 53b9c5d2c8..a178f84af0 100644 --- a/tests/rpc/rpc_06-engine_test.go +++ b/tests/rpc/rpc_06-engine_test.go @@ -8,7 +8,6 @@ import ( "testing" libutils "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/ChainSafe/gossamer/tests/utils/node" ) @@ -16,11 +15,6 @@ import ( func TestEngineRPC(t *testing.T) { t.SkipNow() - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetGssmrGenesisRawPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath diff --git a/tests/rpc/rpc_07-payment_test.go b/tests/rpc/rpc_07-payment_test.go index f75d0b347c..383aa68dc1 100644 --- a/tests/rpc/rpc_07-payment_test.go +++ b/tests/rpc/rpc_07-payment_test.go @@ -8,7 +8,6 @@ import ( "testing" libutils "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/ChainSafe/gossamer/tests/utils/node" ) @@ -16,11 +15,6 @@ import ( func TestPaymentRPC(t *testing.T) { t.SkipNow() // TODO - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetGssmrGenesisRawPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath diff --git a/tests/rpc/rpc_08-contracts_test.go b/tests/rpc/rpc_08-contracts_test.go index 0dd42ac095..43b0747fa3 100644 --- a/tests/rpc/rpc_08-contracts_test.go +++ b/tests/rpc/rpc_08-contracts_test.go @@ -8,7 +8,6 @@ import ( "testing" libutils "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/ChainSafe/gossamer/tests/utils/node" ) @@ -16,11 +15,6 @@ import ( func TestContractsRPC(t *testing.T) { t.SkipNow() // TODO - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetGssmrGenesisRawPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath diff --git a/tests/rpc/rpc_09-babe_test.go b/tests/rpc/rpc_09-babe_test.go index e97185c0ab..3e0f7a947a 100644 --- a/tests/rpc/rpc_09-babe_test.go +++ b/tests/rpc/rpc_09-babe_test.go @@ -8,7 +8,6 @@ import ( "testing" libutils "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/tests/utils" "github.com/ChainSafe/gossamer/tests/utils/config" "github.com/ChainSafe/gossamer/tests/utils/node" ) @@ -16,11 +15,6 @@ import ( func TestBabeRPC(t *testing.T) { t.SkipNow() // TODO - if utils.MODE != rpcSuite { - t.Log("Going to skip RPC suite tests") - return - } - genesisPath := libutils.GetGssmrGenesisRawPathTest(t) tomlConfig := config.Default() tomlConfig.Init.Genesis = genesisPath diff --git a/tests/stress/stress_test.go b/tests/stress/stress_test.go index 9b0fe52abd..d4ad989d3a 100644 --- a/tests/stress/stress_test.go +++ b/tests/stress/stress_test.go @@ -37,6 +37,12 @@ func TestMain(m *testing.M) { return } + err := utils.BuildGossamer() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + logLvl := log.Info if utils.LOGLEVEL != "" { var err error diff --git a/tests/sync/sync_test.go b/tests/sync/sync_test.go index 69880ef743..7c631de613 100644 --- a/tests/sync/sync_test.go +++ b/tests/sync/sync_test.go @@ -48,6 +48,9 @@ func TestCalls(t *testing.T) { t.Skip("MODE != 'sync', skipping stress test") } + err := utils.BuildGossamer() + require.NoError(t, err) + ctx := context.Background() const qtyNodes = 3 diff --git a/tests/utils/build.go b/tests/utils/build.go new file mode 100644 index 0000000000..3fe4fa21b2 --- /dev/null +++ b/tests/utils/build.go @@ -0,0 +1,32 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package utils + +import ( + "context" + "fmt" + "os/exec" + + libutils "github.com/ChainSafe/gossamer/lib/utils" +) + +// BuildGossamer finds the project root path and builds the Gossamer +// binary to ./bin/gossamer at the project root path. +func BuildGossamer() (err error) { + rootPath, err := libutils.GetProjectRootPath() + if err != nil { + return fmt.Errorf("get project root path: %w", err) + } + + ctx := context.Background() + cmd := exec.CommandContext(ctx, "go", "build", + "-trimpath", "-o", "./bin/gossamer", "./cmd/gossamer") + cmd.Dir = rootPath + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("building Gossamer: %w\n%s", err, output) + } + + return nil +} From 5dc567e3703691038e2ee186c55177cd427de6d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Jun 2022 08:45:03 -0400 Subject: [PATCH 22/48] chore(deps): bump github.com/jpillora/ipfilter from 1.2.5 to 1.2.6 (#2583) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index b84ef78bae..c3d0c66a56 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/holiman/bloomfilter/v2 v2.0.3 github.com/ipfs/go-ds-badger2 v0.1.1 github.com/ipfs/go-ipns v0.1.2 //indirect - github.com/jpillora/ipfilter v1.2.5 + github.com/jpillora/ipfilter v1.2.6 github.com/klauspost/compress v1.15.6 github.com/libp2p/go-libp2p v0.15.1 github.com/libp2p/go-libp2p-core v0.9.0 @@ -151,7 +151,7 @@ require ( github.com/multiformats/go-varint v0.0.6 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/phuslu/iploc v1.0.20220429 // indirect + github.com/phuslu/iploc v1.0.20220530 // indirect github.com/pierrec/xxHash v0.1.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/go.sum b/go.sum index 5c5b729787..a975634576 100644 --- a/go.sum +++ b/go.sum @@ -576,8 +576,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jpillora/ipfilter v1.2.5 h1:X3xPuZFOja1nKUE2JzOnkHXpBH2fNQ4oy+QfyjHULTg= -github.com/jpillora/ipfilter v1.2.5/go.mod h1:syZBm/ljGcobmmO6jFKrPdhBSN3NaRkFiavSVW9Uh/Q= +github.com/jpillora/ipfilter v1.2.6 h1:xV4EM4Y8JlCoc+y6+zBoZWiO+XZFNGuIAS8aLmoPnEg= +github.com/jpillora/ipfilter v1.2.6/go.mod h1:0GWZUaqbIhpDMdkgKn/O0TATijP2J+qfXsYuUC3Jj8M= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -1076,8 +1076,8 @@ github.com/perlin-network/life v0.0.0-20191203030451-05c0e0f7eaea/go.mod h1:3KEU github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/phuslu/iploc v1.0.20220429 h1:tydyPSke+L5JQtjtBOXvTaKybrFwa6Bseb96eKPrC+0= -github.com/phuslu/iploc v1.0.20220429/go.mod h1:gsgExGWldwv1AEzZm+Ki9/vGfyjkL33pbSr9HGpt2Xg= +github.com/phuslu/iploc v1.0.20220530 h1:ZrPxG58BwE+pshaKgE2+DY2Of5Gh7E4JmMto8WaGpDQ= +github.com/phuslu/iploc v1.0.20220530/go.mod h1:gsgExGWldwv1AEzZm+Ki9/vGfyjkL33pbSr9HGpt2Xg= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/xxHash v0.1.5 h1:n/jBpwTHiER4xYvK3/CdPVnLDPchj8eTJFFLUb4QHBo= From 0fcde632f7437a3591af3f39728d0ba260922f06 Mon Sep 17 00:00:00 2001 From: Edward Mack Date: Wed, 29 Jun 2022 14:54:34 -0400 Subject: [PATCH 23/48] fix(lib/grandpa): Storing Justification Allows Extra Bytes (GSR-13) (#2618) --- dot/sync/chain_processor.go | 4 +- dot/sync/chain_processor_test.go | 16 +- dot/sync/interface.go | 2 +- dot/sync/mock_interface_test.go | 7 +- dot/sync/syncer_integration_test.go | 5 +- lib/grandpa/message_handler.go | 47 +-- lib/grandpa/message_handler_test.go | 142 +++++++- lib/grandpa/mocks_generate_test.go | 6 + lib/grandpa/mocks_test.go | 538 ++++++++++++++++++++++++++++ 9 files changed, 722 insertions(+), 45 deletions(-) create mode 100644 lib/grandpa/mocks_generate_test.go create mode 100644 lib/grandpa/mocks_test.go diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go index de02d5bd33..267785021e 100644 --- a/dot/sync/chain_processor.go +++ b/dot/sync/chain_processor.go @@ -253,13 +253,13 @@ func (s *chainProcessor) handleJustification(header *types.Header, justification return } - err := s.finalityGadget.VerifyBlockJustification(header.Hash(), justification) + returnedJustification, err := s.finalityGadget.VerifyBlockJustification(header.Hash(), justification) if err != nil { logger.Warnf("failed to verify block number %d and hash %s justification: %s", header.Number, header.Hash(), err) return } - err = s.blockState.SetJustification(header.Hash(), justification) + err = s.blockState.SetJustification(header.Hash(), returnedJustification) if err != nil { logger.Errorf("failed tostore justification: %s", err) return diff --git a/dot/sync/chain_processor_test.go b/dot/sync/chain_processor_test.go index 0f9cb6624d..fb9a3a8b21 100644 --- a/dot/sync/chain_processor_test.go +++ b/dot/sync/chain_processor_test.go @@ -258,7 +258,8 @@ func Test_chainProcessor_handleJustification(t *testing.T) { "invalid justification": { chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(expectedHash, []byte(`x`)).Return(errors.New("error")) + mockFinalityGadget.EXPECT().VerifyBlockJustification(expectedHash, + []byte(`x`)).Return(nil, errors.New("error")) return chainProcessor{ finalityGadget: mockFinalityGadget, } @@ -275,7 +276,7 @@ func Test_chainProcessor_handleJustification(t *testing.T) { mockBlockState := NewMockBlockState(ctrl) mockBlockState.EXPECT().SetJustification(expectedHash, []byte(`xx`)).Return(errors.New("fake error")) mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(expectedHash, []byte(`xx`)).Return(nil) + mockFinalityGadget.EXPECT().VerifyBlockJustification(expectedHash, []byte(`xx`)).Return([]byte(`xx`), nil) return chainProcessor{ blockState: mockBlockState, finalityGadget: mockFinalityGadget, @@ -293,7 +294,7 @@ func Test_chainProcessor_handleJustification(t *testing.T) { mockBlockState := NewMockBlockState(ctrl) mockBlockState.EXPECT().SetJustification(expectedHash, []byte(`1234`)).Return(nil) mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(expectedHash, []byte(`1234`)).Return(nil) + mockFinalityGadget.EXPECT().VerifyBlockJustification(expectedHash, []byte(`1234`)).Return([]byte(`1234`), nil) return chainProcessor{ blockState: mockBlockState, finalityGadget: mockFinalityGadget, @@ -417,7 +418,7 @@ func Test_chainProcessor_processBlockData(t *testing.T) { mockFinalityGadget := NewMockFinalityGadget(ctrl) mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, - 3}) + 3}).Return([]byte{1, 2, 3}, nil) mockStorageState := NewMockStorageState(ctrl) mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, nil) mockBlockImportHandler := NewMockBlockImportHandler(ctrl) @@ -452,7 +453,7 @@ func Test_chainProcessor_processBlockData(t *testing.T) { mockFinalityGadget := NewMockFinalityGadget(ctrl) mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, - 3}) + 3}).Return([]byte{1, 2, 3}, nil) mockStorageState := NewMockStorageState(ctrl) mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, mockError) return chainProcessor{ @@ -484,7 +485,7 @@ func Test_chainProcessor_processBlockData(t *testing.T) { mockFinalityGadget := NewMockFinalityGadget(ctrl) mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, - 3}) + 3}).Return([]byte{1, 2, 3}, nil) mockStorageState := NewMockStorageState(ctrl) mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, nil) mockBlockImportHandler := NewMockBlockImportHandler(ctrl) @@ -687,7 +688,8 @@ func Test_chainProcessor_processBlockData(t *testing.T) { mockTelemetry.EXPECT().SendMessage(gomock.Any()).AnyTimes() mockFinalityGadget := NewMockFinalityGadget(ctrl) mockFinalityGadget.EXPECT().VerifyBlockJustification( - common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), justification) + common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), + justification).Return(justification, nil) return chainProcessor{ blockState: mockBlockState, babeVerifier: mockBabeVerifier, diff --git a/dot/sync/interface.go b/dot/sync/interface.go index 4922a02b12..231f154f9c 100644 --- a/dot/sync/interface.go +++ b/dot/sync/interface.go @@ -76,7 +76,7 @@ type BabeVerifier interface { // FinalityGadget implements justification verification functionality type FinalityGadget interface { - VerifyBlockJustification(common.Hash, []byte) error + VerifyBlockJustification(common.Hash, []byte) ([]byte, error) } // BlockImportHandler is the interface for the handler of newly imported blocks diff --git a/dot/sync/mock_interface_test.go b/dot/sync/mock_interface_test.go index c34f41db3e..5623130556 100644 --- a/dot/sync/mock_interface_test.go +++ b/dot/sync/mock_interface_test.go @@ -658,11 +658,12 @@ func (m *MockFinalityGadget) EXPECT() *MockFinalityGadgetMockRecorder { } // VerifyBlockJustification mocks base method. -func (m *MockFinalityGadget) VerifyBlockJustification(arg0 common.Hash, arg1 []byte) error { +func (m *MockFinalityGadget) VerifyBlockJustification(arg0 common.Hash, arg1 []byte) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VerifyBlockJustification", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 } // VerifyBlockJustification indicates an expected call of VerifyBlockJustification. diff --git a/dot/sync/syncer_integration_test.go b/dot/sync/syncer_integration_test.go index fd1f21a012..716ae951a5 100644 --- a/dot/sync/syncer_integration_test.go +++ b/dot/sync/syncer_integration_test.go @@ -120,7 +120,10 @@ func newTestSyncer(t *testing.T) *Service { cfg.LogLvl = log.Trace mockFinalityGadget := NewMockFinalityGadget(ctrl) mockFinalityGadget.EXPECT().VerifyBlockJustification(gomock.AssignableToTypeOf(common.Hash{}), - gomock.AssignableToTypeOf([]byte{})).AnyTimes() + gomock.AssignableToTypeOf([]byte{})).DoAndReturn(func(hash common.Hash, justification []byte) ([]byte, error) { + return justification, nil + }).AnyTimes() + cfg.FinalityGadget = mockFinalityGadget cfg.Network = newMockNetwork() cfg.Telemetry = mockTelemetryClient diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go index 6a5241694c..766ca26b29 100644 --- a/lib/grandpa/message_handler.go +++ b/lib/grandpa/message_handler.go @@ -553,40 +553,41 @@ func (h *MessageHandler) verifyJustification(just *SignedVote, round, setID uint return nil } -// VerifyBlockJustification verifies the finality justification for a block -func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byte) error { +// VerifyBlockJustification verifies the finality justification for a block, returns scale encoded justification with +// any extra bytes removed. +func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byte) ([]byte, error) { fj := Justification{} err := scale.Unmarshal(justification, &fj) if err != nil { - return err + return nil, err } setID, err := s.grandpaState.GetSetIDByBlockNumber(uint(fj.Commit.Number)) if err != nil { - return fmt.Errorf("cannot get set ID from block number: %w", err) + return nil, fmt.Errorf("cannot get set ID from block number: %w", err) } has, err := s.blockState.HasFinalisedBlock(fj.Round, setID) if err != nil { - return err + return nil, err } if has { - return fmt.Errorf("already have finalised block with setID=%d and round=%d", setID, fj.Round) + return nil, fmt.Errorf("already have finalised block with setID=%d and round=%d", setID, fj.Round) } isDescendant, err := isDescendantOfHighestFinalisedBlock(s.blockState, fj.Commit.Hash) if err != nil { - return err + return nil, err } if !isDescendant { - return errVoteBlockMismatch + return nil, errVoteBlockMismatch } auths, err := s.grandpaState.GetAuthorities(setID) if err != nil { - return fmt.Errorf("cannot get authorities for set ID: %w", err) + return nil, fmt.Errorf("cannot get authorities for set ID: %w", err) } // threshold is two-thirds the number of authorities, @@ -594,7 +595,7 @@ func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byt threshold := (2 * len(auths) / 3) if len(fj.Commit.Precommits) < threshold { - return ErrMinVotesNotMet + return nil, ErrMinVotesNotMet } authPubKeys := make([]AuthData, len(fj.Commit.Precommits)) @@ -604,7 +605,7 @@ func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byt equivocatoryVoters, err := getEquivocatoryVoters(authPubKeys) if err != nil { - return fmt.Errorf("could not get valid equivocatory voters: %w", err) + return nil, fmt.Errorf("could not get valid equivocatory voters: %w", err) } var count int @@ -617,20 +618,20 @@ func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byt // check if vote was for descendant of committed block isDescendant, err := s.blockState.IsDescendantOf(hash, just.Vote.Hash) if err != nil { - return err + return nil, err } if !isDescendant { - return ErrPrecommitBlockMismatch + return nil, ErrPrecommitBlockMismatch } pk, err := ed25519.NewPublicKey(just.AuthorityID[:]) if err != nil { - return err + return nil, err } if !isInAuthSet(pk, auths) { - return ErrAuthorityNotInSet + return nil, ErrAuthorityNotInSet } // verify signature for each precommit @@ -641,16 +642,16 @@ func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byt SetID: setID, }) if err != nil { - return err + return nil, err } ok, err := pk.Verify(msg, just.Signature[:]) if err != nil { - return err + return nil, err } if !ok { - return ErrInvalidSignature + return nil, ErrInvalidSignature } if _, ok := equivocatoryVoters[just.AuthorityID]; ok { @@ -661,30 +662,30 @@ func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byt } if count+len(equivocatoryVoters) < threshold { - return ErrMinVotesNotMet + return nil, ErrMinVotesNotMet } err = verifyBlockHashAgainstBlockNumber(s.blockState, fj.Commit.Hash, uint(fj.Commit.Number)) if err != nil { - return err + return nil, err } for _, preCommit := range fj.Commit.Precommits { err := verifyBlockHashAgainstBlockNumber(s.blockState, preCommit.Vote.Hash, uint(preCommit.Vote.Number)) if err != nil { - return err + return nil, err } } err = s.blockState.SetFinalisedHash(hash, fj.Round, setID) if err != nil { - return err + return nil, err } logger.Debugf( "set finalised block with hash %s, round %d and set id %d", hash, fj.Round, setID) - return nil + return scale.Marshal(fj) } func verifyBlockHashAgainstBlockNumber(bs BlockState, hash common.Hash, number uint) error { diff --git a/lib/grandpa/message_handler_test.go b/lib/grandpa/message_handler_test.go index a03ee6b183..2f43675ed0 100644 --- a/lib/grandpa/message_handler_test.go +++ b/lib/grandpa/message_handler_test.go @@ -4,6 +4,7 @@ package grandpa import ( + "errors" "testing" "time" @@ -15,7 +16,7 @@ import ( "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/golang/mock/gomock" - + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -653,8 +654,9 @@ func TestMessageHandler_VerifyBlockJustification_WithEquivocatoryVotes(t *testin just := newJustification(round, testHash, number, precommits) data, err := scale.Marshal(*just) require.NoError(t, err) - err = gs.VerifyBlockJustification(testHash, data) + returnedJust, err := gs.VerifyBlockJustification(testHash, data) require.NoError(t, err) + require.Equal(t, data, returnedJust) } func TestMessageHandler_VerifyBlockJustification(t *testing.T) { @@ -697,8 +699,9 @@ func TestMessageHandler_VerifyBlockJustification(t *testing.T) { just := newJustification(round, testHash, number, precommits) data, err := scale.Marshal(*just) require.NoError(t, err) - err = gs.VerifyBlockJustification(testHash, data) + returnedJust, err := gs.VerifyBlockJustification(testHash, data) require.NoError(t, err) + require.Equal(t, data, returnedJust) // use wrong hash, shouldn't verify precommits = buildTestJustification(t, 2, round+1, setID, kr, precommit) @@ -706,9 +709,10 @@ func TestMessageHandler_VerifyBlockJustification(t *testing.T) { just.Commit.Precommits[0].Vote.Hash = genhash data, err = scale.Marshal(*just) require.NoError(t, err) - err = gs.VerifyBlockJustification(testHash, data) + returnedJust, err = gs.VerifyBlockJustification(testHash, data) require.NotNil(t, err) require.Equal(t, blocktree.ErrEndNodeNotFound, err) + require.Nil(t, returnedJust) } func TestMessageHandler_VerifyBlockJustification_invalid(t *testing.T) { @@ -753,34 +757,38 @@ func TestMessageHandler_VerifyBlockJustification_invalid(t *testing.T) { just.Commit.Precommits[0].Vote.Hash = genhash data, err := scale.Marshal(*just) require.NoError(t, err) - err = gs.VerifyBlockJustification(testHash, data) + returnedJust, err := gs.VerifyBlockJustification(testHash, data) require.NotNil(t, err) require.Equal(t, ErrPrecommitBlockMismatch, err) + require.Nil(t, returnedJust) // use wrong round, shouldn't verify precommits = buildTestJustification(t, 2, round+1, setID, kr, precommit) just = newJustification(round+2, testHash, number, precommits) data, err = scale.Marshal(*just) require.NoError(t, err) - err = gs.VerifyBlockJustification(testHash, data) + returnedJust, err = gs.VerifyBlockJustification(testHash, data) require.NotNil(t, err) require.Equal(t, ErrInvalidSignature, err) + require.Nil(t, returnedJust) // add authority not in set, shouldn't verify precommits = buildTestJustification(t, len(auths)+1, round+1, setID, kr, precommit) just = newJustification(round+1, testHash, number, precommits) data, err = scale.Marshal(*just) require.NoError(t, err) - err = gs.VerifyBlockJustification(testHash, data) + returnedJust, err = gs.VerifyBlockJustification(testHash, data) require.Equal(t, ErrAuthorityNotInSet, err) + require.Nil(t, returnedJust) // not enough signatures, shouldn't verify precommits = buildTestJustification(t, 1, round+1, setID, kr, precommit) just = newJustification(round+1, testHash, number, precommits) data, err = scale.Marshal(*just) require.NoError(t, err) - err = gs.VerifyBlockJustification(testHash, data) + returnedJust, err = gs.VerifyBlockJustification(testHash, data) require.Equal(t, ErrMinVotesNotMet, err) + require.Nil(t, returnedJust) } func Test_getEquivocatoryVoters(t *testing.T) { @@ -1055,3 +1063,121 @@ func signFakeFullVote( return sig } + +func TestService_VerifyBlockJustification(t *testing.T) { + precommits := buildTestJustification(t, 2, 1, 0, kr, precommit) + justification := newJustification(1, testHash, 1, precommits) + justificationBytes, err := scale.Marshal(*justification) + require.NoError(t, err) + + type fields struct { + blockStateBuilder func(ctrl *gomock.Controller) BlockState + grandpaStateBuilder func(ctrl *gomock.Controller) GrandpaState + } + type args struct { + hash common.Hash + justification []byte + } + tests := map[string]struct { + fields fields + args args + want []byte + wantErr error + }{ + "invalid justification": { + fields: fields{ + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + return nil + }, + grandpaStateBuilder: func(ctrl *gomock.Controller) GrandpaState { + return nil + }, + }, + args: args{ + hash: common.Hash{}, + justification: []byte{1, 2, 3}, + }, + want: nil, + wantErr: errors.New("EOF, field: 0x0000000000000000000000000000000000000000000000000000000000000000, " + + "field: {Hash:0x0000000000000000000000000000000000000000000000000000000000000000 Number:0 Precommits:[]}"), + }, + "valid justification": { + fields: fields{ + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasFinalisedBlock(uint64(1), uint64(0)).Return(false, nil) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(testHeader, nil) + mockBlockState.EXPECT().IsDescendantOf(testHash, testHash). + Return(true, nil).Times(3) + mockBlockState.EXPECT().GetHeader(testHash).Return(testHeader, nil).Times(3) + mockBlockState.EXPECT().SetFinalisedHash(testHash, uint64(1), + uint64(0)).Return(nil) + return mockBlockState + }, + grandpaStateBuilder: func(ctrl *gomock.Controller) GrandpaState { + mockGrandpaState := NewMockGrandpaState(ctrl) + mockGrandpaState.EXPECT().GetSetIDByBlockNumber(uint(1)).Return(uint64(0), nil) + mockGrandpaState.EXPECT().GetAuthorities(uint64(0)).Return([]types.GrandpaVoter{ + {Key: *kr.Alice().Public().(*ed25519.PublicKey), ID: 1}, + {Key: *kr.Bob().Public().(*ed25519.PublicKey), ID: 2}, + {Key: *kr.Charlie().Public().(*ed25519.PublicKey), ID: 3}, + }, nil) + return mockGrandpaState + }, + }, + args: args{ + hash: testHash, + justification: justificationBytes, + }, + want: justificationBytes, + }, + "valid justification extra bytes": { + fields: fields{ + blockStateBuilder: func(ctrl *gomock.Controller) BlockState { + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().HasFinalisedBlock(uint64(1), uint64(0)).Return(false, nil) + mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(testHeader, nil) + mockBlockState.EXPECT().IsDescendantOf(testHash, testHash). + Return(true, nil).Times(3) + mockBlockState.EXPECT().GetHeader(testHash).Return(testHeader, nil).Times(3) + mockBlockState.EXPECT().SetFinalisedHash(testHash, uint64(1), + uint64(0)).Return(nil) + return mockBlockState + }, + grandpaStateBuilder: func(ctrl *gomock.Controller) GrandpaState { + mockGrandpaState := NewMockGrandpaState(ctrl) + mockGrandpaState.EXPECT().GetSetIDByBlockNumber(uint(1)).Return(uint64(0), nil) + mockGrandpaState.EXPECT().GetAuthorities(uint64(0)).Return([]types.GrandpaVoter{ + {Key: *kr.Alice().Public().(*ed25519.PublicKey), ID: 1}, + {Key: *kr.Bob().Public().(*ed25519.PublicKey), ID: 2}, + {Key: *kr.Charlie().Public().(*ed25519.PublicKey), ID: 3}, + }, nil) + return mockGrandpaState + }, + }, + args: args{ + hash: testHash, + justification: append(justificationBytes, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}...), + }, + want: justificationBytes, + }, + } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + s := &Service{ + blockState: tt.fields.blockStateBuilder(ctrl), + grandpaState: tt.fields.grandpaStateBuilder(ctrl), + } + got, err := s.VerifyBlockJustification(tt.args.hash, tt.args.justification) + if tt.wantErr != nil { + assert.ErrorContains(t, err, tt.wantErr.Error()) + } else { + require.NoError(t, err) + } + assert.Equalf(t, tt.want, got, "VerifyBlockJustification(%v, %v)", tt.args.hash, tt.args.justification) + }) + } +} diff --git a/lib/grandpa/mocks_generate_test.go b/lib/grandpa/mocks_generate_test.go new file mode 100644 index 0000000000..84d6188cef --- /dev/null +++ b/lib/grandpa/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package grandpa + +//go:generate mockgen -destination=mocks_test.go -package $GOPACKAGE . BlockState,GrandpaState diff --git a/lib/grandpa/mocks_test.go b/lib/grandpa/mocks_test.go new file mode 100644 index 0000000000..a91d5301de --- /dev/null +++ b/lib/grandpa/mocks_test.go @@ -0,0 +1,538 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/lib/grandpa (interfaces: BlockState,GrandpaState) + +// Package grandpa is a generated GoMock package. +package grandpa + +import ( + reflect "reflect" + + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + gomock "github.com/golang/mock/gomock" +) + +// MockBlockState is a mock of BlockState interface. +type MockBlockState struct { + ctrl *gomock.Controller + recorder *MockBlockStateMockRecorder +} + +// MockBlockStateMockRecorder is the mock recorder for MockBlockState. +type MockBlockStateMockRecorder struct { + mock *MockBlockState +} + +// NewMockBlockState creates a new mock instance. +func NewMockBlockState(ctrl *gomock.Controller) *MockBlockState { + mock := &MockBlockState{ctrl: ctrl} + mock.recorder = &MockBlockStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBlockState) EXPECT() *MockBlockStateMockRecorder { + return m.recorder +} + +// BestBlockHash mocks base method. +func (m *MockBlockState) BestBlockHash() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BestBlockHash") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// BestBlockHash indicates an expected call of BestBlockHash. +func (mr *MockBlockStateMockRecorder) BestBlockHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockHash", reflect.TypeOf((*MockBlockState)(nil).BestBlockHash)) +} + +// BestBlockHeader mocks base method. +func (m *MockBlockState) BestBlockHeader() (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BestBlockHeader") + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BestBlockHeader indicates an expected call of BestBlockHeader. +func (mr *MockBlockStateMockRecorder) BestBlockHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockHeader", reflect.TypeOf((*MockBlockState)(nil).BestBlockHeader)) +} + +// BestBlockNumber mocks base method. +func (m *MockBlockState) BestBlockNumber() (uint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BestBlockNumber") + ret0, _ := ret[0].(uint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BestBlockNumber indicates an expected call of BestBlockNumber. +func (mr *MockBlockStateMockRecorder) BestBlockNumber() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockNumber", reflect.TypeOf((*MockBlockState)(nil).BestBlockNumber)) +} + +// BlocktreeAsString mocks base method. +func (m *MockBlockState) BlocktreeAsString() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlocktreeAsString") + ret0, _ := ret[0].(string) + return ret0 +} + +// BlocktreeAsString indicates an expected call of BlocktreeAsString. +func (mr *MockBlockStateMockRecorder) BlocktreeAsString() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlocktreeAsString", reflect.TypeOf((*MockBlockState)(nil).BlocktreeAsString)) +} + +// FreeFinalisedNotifierChannel mocks base method. +func (m *MockBlockState) FreeFinalisedNotifierChannel(arg0 chan *types.FinalisationInfo) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "FreeFinalisedNotifierChannel", arg0) +} + +// FreeFinalisedNotifierChannel indicates an expected call of FreeFinalisedNotifierChannel. +func (mr *MockBlockStateMockRecorder) FreeFinalisedNotifierChannel(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FreeFinalisedNotifierChannel", reflect.TypeOf((*MockBlockState)(nil).FreeFinalisedNotifierChannel), arg0) +} + +// FreeImportedBlockNotifierChannel mocks base method. +func (m *MockBlockState) FreeImportedBlockNotifierChannel(arg0 chan *types.Block) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "FreeImportedBlockNotifierChannel", arg0) +} + +// FreeImportedBlockNotifierChannel indicates an expected call of FreeImportedBlockNotifierChannel. +func (mr *MockBlockStateMockRecorder) FreeImportedBlockNotifierChannel(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FreeImportedBlockNotifierChannel", reflect.TypeOf((*MockBlockState)(nil).FreeImportedBlockNotifierChannel), arg0) +} + +// GenesisHash mocks base method. +func (m *MockBlockState) GenesisHash() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GenesisHash") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// GenesisHash indicates an expected call of GenesisHash. +func (mr *MockBlockStateMockRecorder) GenesisHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenesisHash", reflect.TypeOf((*MockBlockState)(nil).GenesisHash)) +} + +// GetFinalisedHeader mocks base method. +func (m *MockBlockState) GetFinalisedHeader(arg0, arg1 uint64) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFinalisedHeader", arg0, arg1) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFinalisedHeader indicates an expected call of GetFinalisedHeader. +func (mr *MockBlockStateMockRecorder) GetFinalisedHeader(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFinalisedHeader", reflect.TypeOf((*MockBlockState)(nil).GetFinalisedHeader), arg0, arg1) +} + +// GetFinalisedNotifierChannel mocks base method. +func (m *MockBlockState) GetFinalisedNotifierChannel() chan *types.FinalisationInfo { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFinalisedNotifierChannel") + ret0, _ := ret[0].(chan *types.FinalisationInfo) + return ret0 +} + +// GetFinalisedNotifierChannel indicates an expected call of GetFinalisedNotifierChannel. +func (mr *MockBlockStateMockRecorder) GetFinalisedNotifierChannel() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFinalisedNotifierChannel", reflect.TypeOf((*MockBlockState)(nil).GetFinalisedNotifierChannel)) +} + +// GetHashByNumber mocks base method. +func (m *MockBlockState) GetHashByNumber(arg0 uint) (common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHashByNumber", arg0) + ret0, _ := ret[0].(common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHashByNumber indicates an expected call of GetHashByNumber. +func (mr *MockBlockStateMockRecorder) GetHashByNumber(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHashByNumber", reflect.TypeOf((*MockBlockState)(nil).GetHashByNumber), arg0) +} + +// GetHeader mocks base method. +func (m *MockBlockState) GetHeader(arg0 common.Hash) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeader", arg0) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHeader indicates an expected call of GetHeader. +func (mr *MockBlockStateMockRecorder) GetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockBlockState)(nil).GetHeader), arg0) +} + +// GetHeaderByNumber mocks base method. +func (m *MockBlockState) GetHeaderByNumber(arg0 uint) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeaderByNumber", arg0) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHeaderByNumber indicates an expected call of GetHeaderByNumber. +func (mr *MockBlockStateMockRecorder) GetHeaderByNumber(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeaderByNumber", reflect.TypeOf((*MockBlockState)(nil).GetHeaderByNumber), arg0) +} + +// GetHighestFinalisedHeader mocks base method. +func (m *MockBlockState) GetHighestFinalisedHeader() (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestFinalisedHeader") + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestFinalisedHeader indicates an expected call of GetHighestFinalisedHeader. +func (mr *MockBlockStateMockRecorder) GetHighestFinalisedHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestFinalisedHeader", reflect.TypeOf((*MockBlockState)(nil).GetHighestFinalisedHeader)) +} + +// GetHighestRoundAndSetID mocks base method. +func (m *MockBlockState) GetHighestRoundAndSetID() (uint64, uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestRoundAndSetID") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(uint64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetHighestRoundAndSetID indicates an expected call of GetHighestRoundAndSetID. +func (mr *MockBlockStateMockRecorder) GetHighestRoundAndSetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestRoundAndSetID", reflect.TypeOf((*MockBlockState)(nil).GetHighestRoundAndSetID)) +} + +// GetImportedBlockNotifierChannel mocks base method. +func (m *MockBlockState) GetImportedBlockNotifierChannel() chan *types.Block { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetImportedBlockNotifierChannel") + ret0, _ := ret[0].(chan *types.Block) + return ret0 +} + +// GetImportedBlockNotifierChannel indicates an expected call of GetImportedBlockNotifierChannel. +func (mr *MockBlockStateMockRecorder) GetImportedBlockNotifierChannel() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImportedBlockNotifierChannel", reflect.TypeOf((*MockBlockState)(nil).GetImportedBlockNotifierChannel)) +} + +// GetJustification mocks base method. +func (m *MockBlockState) GetJustification(arg0 common.Hash) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetJustification", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetJustification indicates an expected call of GetJustification. +func (mr *MockBlockStateMockRecorder) GetJustification(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetJustification", reflect.TypeOf((*MockBlockState)(nil).GetJustification), arg0) +} + +// HasFinalisedBlock mocks base method. +func (m *MockBlockState) HasFinalisedBlock(arg0, arg1 uint64) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasFinalisedBlock", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasFinalisedBlock indicates an expected call of HasFinalisedBlock. +func (mr *MockBlockStateMockRecorder) HasFinalisedBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasFinalisedBlock", reflect.TypeOf((*MockBlockState)(nil).HasFinalisedBlock), arg0, arg1) +} + +// HasHeader mocks base method. +func (m *MockBlockState) HasHeader(arg0 common.Hash) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasHeader", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasHeader indicates an expected call of HasHeader. +func (mr *MockBlockStateMockRecorder) HasHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasHeader", reflect.TypeOf((*MockBlockState)(nil).HasHeader), arg0) +} + +// HasJustification mocks base method. +func (m *MockBlockState) HasJustification(arg0 common.Hash) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasJustification", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasJustification indicates an expected call of HasJustification. +func (mr *MockBlockStateMockRecorder) HasJustification(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasJustification", reflect.TypeOf((*MockBlockState)(nil).HasJustification), arg0) +} + +// HighestCommonAncestor mocks base method. +func (m *MockBlockState) HighestCommonAncestor(arg0, arg1 common.Hash) (common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HighestCommonAncestor", arg0, arg1) + ret0, _ := ret[0].(common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HighestCommonAncestor indicates an expected call of HighestCommonAncestor. +func (mr *MockBlockStateMockRecorder) HighestCommonAncestor(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HighestCommonAncestor", reflect.TypeOf((*MockBlockState)(nil).HighestCommonAncestor), arg0, arg1) +} + +// IsDescendantOf mocks base method. +func (m *MockBlockState) IsDescendantOf(arg0, arg1 common.Hash) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsDescendantOf", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsDescendantOf indicates an expected call of IsDescendantOf. +func (mr *MockBlockStateMockRecorder) IsDescendantOf(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDescendantOf", reflect.TypeOf((*MockBlockState)(nil).IsDescendantOf), arg0, arg1) +} + +// Leaves mocks base method. +func (m *MockBlockState) Leaves() []common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Leaves") + ret0, _ := ret[0].([]common.Hash) + return ret0 +} + +// Leaves indicates an expected call of Leaves. +func (mr *MockBlockStateMockRecorder) Leaves() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Leaves", reflect.TypeOf((*MockBlockState)(nil).Leaves)) +} + +// SetFinalisedHash mocks base method. +func (m *MockBlockState) SetFinalisedHash(arg0 common.Hash, arg1, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetFinalisedHash", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetFinalisedHash indicates an expected call of SetFinalisedHash. +func (mr *MockBlockStateMockRecorder) SetFinalisedHash(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalisedHash", reflect.TypeOf((*MockBlockState)(nil).SetFinalisedHash), arg0, arg1, arg2) +} + +// SetJustification mocks base method. +func (m *MockBlockState) SetJustification(arg0 common.Hash, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetJustification", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetJustification indicates an expected call of SetJustification. +func (mr *MockBlockStateMockRecorder) SetJustification(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetJustification", reflect.TypeOf((*MockBlockState)(nil).SetJustification), arg0, arg1) +} + +// MockGrandpaState is a mock of GrandpaState interface. +type MockGrandpaState struct { + ctrl *gomock.Controller + recorder *MockGrandpaStateMockRecorder +} + +// MockGrandpaStateMockRecorder is the mock recorder for MockGrandpaState. +type MockGrandpaStateMockRecorder struct { + mock *MockGrandpaState +} + +// NewMockGrandpaState creates a new mock instance. +func NewMockGrandpaState(ctrl *gomock.Controller) *MockGrandpaState { + mock := &MockGrandpaState{ctrl: ctrl} + mock.recorder = &MockGrandpaStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGrandpaState) EXPECT() *MockGrandpaStateMockRecorder { + return m.recorder +} + +// GetAuthorities mocks base method. +func (m *MockGrandpaState) GetAuthorities(arg0 uint64) ([]types.GrandpaVoter, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAuthorities", arg0) + ret0, _ := ret[0].([]types.GrandpaVoter) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAuthorities indicates an expected call of GetAuthorities. +func (mr *MockGrandpaStateMockRecorder) GetAuthorities(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAuthorities", reflect.TypeOf((*MockGrandpaState)(nil).GetAuthorities), arg0) +} + +// GetCurrentSetID mocks base method. +func (m *MockGrandpaState) GetCurrentSetID() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentSetID") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentSetID indicates an expected call of GetCurrentSetID. +func (mr *MockGrandpaStateMockRecorder) GetCurrentSetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSetID", reflect.TypeOf((*MockGrandpaState)(nil).GetCurrentSetID)) +} + +// GetLatestRound mocks base method. +func (m *MockGrandpaState) GetLatestRound() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLatestRound") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLatestRound indicates an expected call of GetLatestRound. +func (mr *MockGrandpaStateMockRecorder) GetLatestRound() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestRound", reflect.TypeOf((*MockGrandpaState)(nil).GetLatestRound)) +} + +// GetPrecommits mocks base method. +func (m *MockGrandpaState) GetPrecommits(arg0, arg1 uint64) ([]types.GrandpaSignedVote, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPrecommits", arg0, arg1) + ret0, _ := ret[0].([]types.GrandpaSignedVote) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPrecommits indicates an expected call of GetPrecommits. +func (mr *MockGrandpaStateMockRecorder) GetPrecommits(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrecommits", reflect.TypeOf((*MockGrandpaState)(nil).GetPrecommits), arg0, arg1) +} + +// GetPrevotes mocks base method. +func (m *MockGrandpaState) GetPrevotes(arg0, arg1 uint64) ([]types.GrandpaSignedVote, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPrevotes", arg0, arg1) + ret0, _ := ret[0].([]types.GrandpaSignedVote) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPrevotes indicates an expected call of GetPrevotes. +func (mr *MockGrandpaStateMockRecorder) GetPrevotes(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPrevotes", reflect.TypeOf((*MockGrandpaState)(nil).GetPrevotes), arg0, arg1) +} + +// GetSetIDByBlockNumber mocks base method. +func (m *MockGrandpaState) GetSetIDByBlockNumber(arg0 uint) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSetIDByBlockNumber", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSetIDByBlockNumber indicates an expected call of GetSetIDByBlockNumber. +func (mr *MockGrandpaStateMockRecorder) GetSetIDByBlockNumber(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSetIDByBlockNumber", reflect.TypeOf((*MockGrandpaState)(nil).GetSetIDByBlockNumber), arg0) +} + +// SetLatestRound mocks base method. +func (m *MockGrandpaState) SetLatestRound(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetLatestRound", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetLatestRound indicates an expected call of SetLatestRound. +func (mr *MockGrandpaStateMockRecorder) SetLatestRound(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLatestRound", reflect.TypeOf((*MockGrandpaState)(nil).SetLatestRound), arg0) +} + +// SetPrecommits mocks base method. +func (m *MockGrandpaState) SetPrecommits(arg0, arg1 uint64, arg2 []types.GrandpaSignedVote) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPrecommits", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetPrecommits indicates an expected call of SetPrecommits. +func (mr *MockGrandpaStateMockRecorder) SetPrecommits(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPrecommits", reflect.TypeOf((*MockGrandpaState)(nil).SetPrecommits), arg0, arg1, arg2) +} + +// SetPrevotes mocks base method. +func (m *MockGrandpaState) SetPrevotes(arg0, arg1 uint64, arg2 []types.GrandpaSignedVote) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPrevotes", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetPrevotes indicates an expected call of SetPrevotes. +func (mr *MockGrandpaStateMockRecorder) SetPrevotes(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPrevotes", reflect.TypeOf((*MockGrandpaState)(nil).SetPrevotes), arg0, arg1, arg2) +} From e7749cffd67f868329f3d0d9eae950198ec9145d Mon Sep 17 00:00:00 2001 From: Edward Mack Date: Wed, 29 Jun 2022 14:56:09 -0400 Subject: [PATCH 24/48] fix(dot/rpc/modules): grandpa.proveFinality update parameters, fix bug (#2576) --- dot/rpc/modules/api_mocks.go | 4 +- dot/rpc/modules/grandpa.go | 41 +- dot/rpc/modules/grandpa_integration_test.go | 12 +- dot/rpc/modules/grandpa_test.go | 159 +- dot/rpc/modules/mocks_generate_test.go | 6 + dot/rpc/modules/mocks_test.go | 267 ++ dot/rpc/subscription/websocket_test.go | 6 +- dot/rpc/websocket_test.go | 2 +- tests/polkadotjs_test/package-lock.json | 3011 ++++++++++++++--- tests/polkadotjs_test/package.json | 2 +- .../polkadotjs_test/start_polkadotjs_test.go | 1 + tests/polkadotjs_test/test/test-polkadot.js | 15 +- 12 files changed, 2970 insertions(+), 556 deletions(-) create mode 100644 dot/rpc/modules/mocks_generate_test.go create mode 100644 dot/rpc/modules/mocks_test.go diff --git a/dot/rpc/modules/api_mocks.go b/dot/rpc/modules/api_mocks.go index 778caae53c..e46ce9e5a7 100644 --- a/dot/rpc/modules/api_mocks.go +++ b/dot/rpc/modules/api_mocks.go @@ -27,8 +27,8 @@ func NewMockStorageAPI() *modulesmocks.StorageAPI { return m } -// NewMockBlockAPI creates and return an rpc BlockAPI interface mock -func NewMockBlockAPI() *modulesmocks.BlockAPI { +// NewMockeryBlockAPI creates and return an rpc BlockAPI interface mock +func NewMockeryBlockAPI() *modulesmocks.BlockAPI { m := new(modulesmocks.BlockAPI) m.On("GetHeader", mock.AnythingOfType("common.Hash")).Return(nil, nil) m.On("BestBlockHash").Return(common.Hash{}) diff --git a/dot/rpc/modules/grandpa.go b/dot/rpc/modules/grandpa.go index 444c249a9d..f6773c87f5 100644 --- a/dot/rpc/modules/grandpa.go +++ b/dot/rpc/modules/grandpa.go @@ -4,6 +4,7 @@ package modules import ( + "fmt" "net/http" "github.com/ChainSafe/gossamer/lib/common" @@ -48,39 +49,35 @@ type RoundStateResponse struct { // ProveFinalityRequest request struct type ProveFinalityRequest struct { - blockHashStart common.Hash - blockHashEnd common.Hash - authorityID uint64 + BlockNumber uint32 `json:"blockNumber"` } // ProveFinalityResponse is an optional SCALE encoded proof array -type ProveFinalityResponse [][]byte +type ProveFinalityResponse []string -// ProveFinality for the provided block range. Returns NULL if there are no known finalised blocks in the range. -// If no authorities set is provided, the current one will be attempted. +// ProveFinality for the provided block number, the Justification for the last block in the set is written to the +// response. The response is a SCALE encoded proof array. The proof array is empty if the block number is +// not finalized. +// Returns error which are included in the response if they occur. func (gm *GrandpaModule) ProveFinality(r *http.Request, req *ProveFinalityRequest, res *ProveFinalityResponse) error { - blocksToCheck, err := gm.blockAPI.SubChain(req.blockHashStart, req.blockHashEnd) + blockHash, err := gm.blockAPI.GetHashByNumber(uint(req.BlockNumber)) if err != nil { return err } - - // Leaving check in for linter - if req.authorityID != uint64(0) { - // TODO: Check if functionality relevant (#1404) + hasJustification, err := gm.blockAPI.HasJustification(blockHash) + if err != nil { + return fmt.Errorf("checking for justification: %w", err) } - for _, block := range blocksToCheck { - hasJustification, _ := gm.blockAPI.HasJustification(block) - if !hasJustification { - continue - } - - justification, err := gm.blockAPI.GetJustification(block) - if err != nil { - continue - } - *res = append(*res, justification) + if !hasJustification { + *res = append(*res, "GRANDPA prove finality rpc failed: Block not covered by authority set changes") + return nil + } + justification, err := gm.blockAPI.GetJustification(blockHash) + if err != nil { + return fmt.Errorf("getting justification: %w", err) } + *res = append(*res, common.BytesToHex(justification)) return nil } diff --git a/dot/rpc/modules/grandpa_integration_test.go b/dot/rpc/modules/grandpa_integration_test.go index c4189b68a4..a7e328cc48 100644 --- a/dot/rpc/modules/grandpa_integration_test.go +++ b/dot/rpc/modules/grandpa_integration_test.go @@ -6,7 +6,6 @@ package modules import ( - "reflect" "testing" "github.com/ChainSafe/gossamer/dot/state" @@ -14,6 +13,7 @@ import ( "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/grandpa" "github.com/ChainSafe/gossamer/lib/keystore" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" rpcmocks "github.com/ChainSafe/gossamer/dot/rpc/modules/mocks" @@ -36,22 +36,18 @@ func TestGrandpaProveFinality(t *testing.T) { testStateService.Block.SetJustification(bestBlock.Header.ParentHash, make([]byte, 10)) testStateService.Block.SetJustification(bestBlock.Header.Hash(), make([]byte, 11)) - var expectedResponse ProveFinalityResponse - expectedResponse = append(expectedResponse, make([]byte, 10), make([]byte, 11)) + expectedResponse := &ProveFinalityResponse{"0x0000000000000000000000"} res := new(ProveFinalityResponse) err = gmSvc.ProveFinality(nil, &ProveFinalityRequest{ - blockHashStart: bestBlock.Header.ParentHash, - blockHashEnd: bestBlock.Header.Hash(), + BlockNumber: uint32(bestBlock.Header.Number), }, res) if err != nil { t.Fatal(err) } - if !reflect.DeepEqual(*res, expectedResponse) { - t.Errorf("Fail: expected: %+v got: %+v\n", res, &expectedResponse) - } + assert.Equal(t, *expectedResponse, *res) } func TestRoundState(t *testing.T) { diff --git a/dot/rpc/modules/grandpa_test.go b/dot/rpc/modules/grandpa_test.go index 569b1278ba..650dfd0a26 100644 --- a/dot/rpc/modules/grandpa_test.go +++ b/dot/rpc/modules/grandpa_test.go @@ -14,126 +14,99 @@ import ( "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/grandpa" "github.com/ChainSafe/gossamer/lib/keystore" - + "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) func TestGrandpaModule_ProveFinality(t *testing.T) { - testHash := common.NewHash([]byte{0x01, 0x02}) - testHashSlice := []common.Hash{testHash, testHash, testHash} - - mockBlockFinalityAPI := new(mocks.BlockFinalityAPI) - mockBlockAPI := new(mocks.BlockAPI) - mockBlockAPI.On("SubChain", testHash, testHash).Return(testHashSlice, nil) - mockBlockAPI.On("HasJustification", testHash).Return(true, nil) - mockBlockAPI.On("GetJustification", testHash).Return([]byte("test"), nil) + t.Parallel() - mockBlockAPIHasJustErr := new(mocks.BlockAPI) - mockBlockAPIHasJustErr.On("SubChain", testHash, testHash).Return(testHashSlice, nil) - mockBlockAPIHasJustErr.On("HasJustification", testHash).Return(false, nil) + mockError := errors.New("test mock error") - mockBlockAPIGetJustErr := new(mocks.BlockAPI) - mockBlockAPIGetJustErr.On("SubChain", testHash, testHash).Return(testHashSlice, nil) - mockBlockAPIGetJustErr.On("HasJustification", testHash).Return(true, nil) - mockBlockAPIGetJustErr.On("GetJustification", testHash).Return(nil, errors.New("GetJustification error")) - - mockBlockAPISubChainErr := new(mocks.BlockAPI) - mockBlockAPISubChainErr.On("SubChain", testHash, testHash).Return(nil, errors.New("SubChain error")) - - grandpaModule := NewGrandpaModule(mockBlockAPISubChainErr, mockBlockFinalityAPI) - type fields struct { - blockAPI BlockAPI - blockFinalityAPI BlockFinalityAPI - } - type args struct { - r *http.Request - req *ProveFinalityRequest - } - tests := []struct { - name string - fields fields - args args - expErr error - exp ProveFinalityResponse + tests := map[string]struct { + blockAPIBuilder func(ctrl *gomock.Controller) BlockAPI + request *ProveFinalityRequest + expErr error + exp ProveFinalityResponse }{ - { - name: "SubChain Err", - fields: fields{ - grandpaModule.blockAPI, - grandpaModule.blockFinalityAPI, + "error during get hash by number": { + blockAPIBuilder: func(ctrl *gomock.Controller) BlockAPI { + mockBlockAPI := NewMockBlockAPI(ctrl) + mockBlockAPI.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{}, mockError) + return mockBlockAPI }, - args: args{ - req: &ProveFinalityRequest{ - blockHashStart: testHash, - blockHashEnd: testHash, - authorityID: uint64(21), - }, + request: &ProveFinalityRequest{ + BlockNumber: 1, }, - expErr: errors.New("SubChain error"), + expErr: mockError, }, - { - name: "OK Case", - fields: fields{ - mockBlockAPI, - mockBlockFinalityAPI, + "error during has justification": { + blockAPIBuilder: func(ctrl *gomock.Controller) BlockAPI { + mockBlockAPI := NewMockBlockAPI(ctrl) + mockBlockAPI.EXPECT().GetHashByNumber(uint(2)).Return(common.Hash{2}, nil) + mockBlockAPI.EXPECT().HasJustification(common.Hash{2}).Return(false, mockError) + return mockBlockAPI }, - args: args{ - req: &ProveFinalityRequest{ - blockHashStart: testHash, - blockHashEnd: testHash, - authorityID: uint64(21), - }, + request: &ProveFinalityRequest{ + BlockNumber: 2, }, - exp: ProveFinalityResponse{ - []uint8{0x74, 0x65, 0x73, 0x74}, - []uint8{0x74, 0x65, 0x73, 0x74}, - []uint8{0x74, 0x65, 0x73, 0x74}}, + expErr: mockError, }, - { - name: "HasJustification Error", - fields: fields{ - mockBlockAPIHasJustErr, - mockBlockFinalityAPI, + "has justification is false": { + blockAPIBuilder: func(ctrl *gomock.Controller) BlockAPI { + mockBlockAPI := NewMockBlockAPI(ctrl) + mockBlockAPI.EXPECT().GetHashByNumber(uint(2)).Return(common.Hash{2}, nil) + mockBlockAPI.EXPECT().HasJustification(common.Hash{2}).Return(false, nil) + return mockBlockAPI }, - args: args{ - req: &ProveFinalityRequest{ - blockHashStart: testHash, - blockHashEnd: testHash, - authorityID: uint64(21), - }, + request: &ProveFinalityRequest{ + BlockNumber: 2, }, - exp: ProveFinalityResponse(nil), + exp: ProveFinalityResponse{"GRANDPA prove finality rpc failed: Block not covered by authority set changes"}, }, - { - name: "GetJustification Error", - fields: fields{ - mockBlockAPIGetJustErr, - mockBlockFinalityAPI, + "error during getJustification": { + blockAPIBuilder: func(ctrl *gomock.Controller) BlockAPI { + mockBlockAPI := NewMockBlockAPI(ctrl) + mockBlockAPI.EXPECT().GetHashByNumber(uint(3)).Return(common.Hash{3}, nil) + mockBlockAPI.EXPECT().HasJustification(common.Hash{3}).Return(true, nil) + mockBlockAPI.EXPECT().GetJustification(common.Hash{3}).Return(nil, mockError) + return mockBlockAPI }, - args: args{ - req: &ProveFinalityRequest{ - blockHashStart: testHash, - blockHashEnd: testHash, - authorityID: uint64(21), - }, + request: &ProveFinalityRequest{ + BlockNumber: 3, + }, + expErr: mockError, + }, + "happy path": { + blockAPIBuilder: func(ctrl *gomock.Controller) BlockAPI { + mockBlockAPI := NewMockBlockAPI(ctrl) + mockBlockAPI.EXPECT().GetHashByNumber(uint(4)).Return(common.Hash{4}, nil) + mockBlockAPI.EXPECT().HasJustification(common.Hash{4}).Return(true, nil) + mockBlockAPI.EXPECT().GetJustification(common.Hash{4}).Return([]byte(`justification`), nil) + return mockBlockAPI + }, + request: &ProveFinalityRequest{ + BlockNumber: 4, }, - exp: ProveFinalityResponse(nil), + exp: ProveFinalityResponse{common.BytesToHex([]byte(`justification`))}, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) gm := &GrandpaModule{ - blockAPI: tt.fields.blockAPI, - blockFinalityAPI: tt.fields.blockFinalityAPI, + blockAPI: tt.blockAPIBuilder(ctrl), } res := ProveFinalityResponse(nil) - err := gm.ProveFinality(tt.args.r, tt.args.req, &res) + err := gm.ProveFinality(nil, tt.request, &res) + assert.Equal(t, tt.exp, res) if tt.expErr != nil { - assert.EqualError(t, err, tt.expErr.Error()) + assert.ErrorContains(t, err, tt.expErr.Error()) } else { assert.NoError(t, err) } - assert.Equal(t, tt.exp, res) }) } } diff --git a/dot/rpc/modules/mocks_generate_test.go b/dot/rpc/modules/mocks_generate_test.go new file mode 100644 index 0000000000..33dac5a419 --- /dev/null +++ b/dot/rpc/modules/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package modules + +//go:generate mockgen -destination=mocks_test.go -package=$GOPACKAGE . BlockAPI diff --git a/dot/rpc/modules/mocks_test.go b/dot/rpc/modules/mocks_test.go new file mode 100644 index 0000000000..6478d6cdf5 --- /dev/null +++ b/dot/rpc/modules/mocks_test.go @@ -0,0 +1,267 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/rpc/modules (interfaces: BlockAPI) + +// Package modules is a generated GoMock package. +package modules + +import ( + reflect "reflect" + + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + runtime "github.com/ChainSafe/gossamer/lib/runtime" + gomock "github.com/golang/mock/gomock" +) + +// MockBlockAPI is a mock of BlockAPI interface. +type MockBlockAPI struct { + ctrl *gomock.Controller + recorder *MockBlockAPIMockRecorder +} + +// MockBlockAPIMockRecorder is the mock recorder for MockBlockAPI. +type MockBlockAPIMockRecorder struct { + mock *MockBlockAPI +} + +// NewMockBlockAPI creates a new mock instance. +func NewMockBlockAPI(ctrl *gomock.Controller) *MockBlockAPI { + mock := &MockBlockAPI{ctrl: ctrl} + mock.recorder = &MockBlockAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBlockAPI) EXPECT() *MockBlockAPIMockRecorder { + return m.recorder +} + +// BestBlockHash mocks base method. +func (m *MockBlockAPI) BestBlockHash() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BestBlockHash") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// BestBlockHash indicates an expected call of BestBlockHash. +func (mr *MockBlockAPIMockRecorder) BestBlockHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BestBlockHash", reflect.TypeOf((*MockBlockAPI)(nil).BestBlockHash)) +} + +// FreeFinalisedNotifierChannel mocks base method. +func (m *MockBlockAPI) FreeFinalisedNotifierChannel(arg0 chan *types.FinalisationInfo) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "FreeFinalisedNotifierChannel", arg0) +} + +// FreeFinalisedNotifierChannel indicates an expected call of FreeFinalisedNotifierChannel. +func (mr *MockBlockAPIMockRecorder) FreeFinalisedNotifierChannel(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FreeFinalisedNotifierChannel", reflect.TypeOf((*MockBlockAPI)(nil).FreeFinalisedNotifierChannel), arg0) +} + +// FreeImportedBlockNotifierChannel mocks base method. +func (m *MockBlockAPI) FreeImportedBlockNotifierChannel(arg0 chan *types.Block) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "FreeImportedBlockNotifierChannel", arg0) +} + +// FreeImportedBlockNotifierChannel indicates an expected call of FreeImportedBlockNotifierChannel. +func (mr *MockBlockAPIMockRecorder) FreeImportedBlockNotifierChannel(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FreeImportedBlockNotifierChannel", reflect.TypeOf((*MockBlockAPI)(nil).FreeImportedBlockNotifierChannel), arg0) +} + +// GetBlockByHash mocks base method. +func (m *MockBlockAPI) GetBlockByHash(arg0 common.Hash) (*types.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockByHash", arg0) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockByHash indicates an expected call of GetBlockByHash. +func (mr *MockBlockAPIMockRecorder) GetBlockByHash(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByHash", reflect.TypeOf((*MockBlockAPI)(nil).GetBlockByHash), arg0) +} + +// GetFinalisedHash mocks base method. +func (m *MockBlockAPI) GetFinalisedHash(arg0, arg1 uint64) (common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFinalisedHash", arg0, arg1) + ret0, _ := ret[0].(common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFinalisedHash indicates an expected call of GetFinalisedHash. +func (mr *MockBlockAPIMockRecorder) GetFinalisedHash(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFinalisedHash", reflect.TypeOf((*MockBlockAPI)(nil).GetFinalisedHash), arg0, arg1) +} + +// GetFinalisedNotifierChannel mocks base method. +func (m *MockBlockAPI) GetFinalisedNotifierChannel() chan *types.FinalisationInfo { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFinalisedNotifierChannel") + ret0, _ := ret[0].(chan *types.FinalisationInfo) + return ret0 +} + +// GetFinalisedNotifierChannel indicates an expected call of GetFinalisedNotifierChannel. +func (mr *MockBlockAPIMockRecorder) GetFinalisedNotifierChannel() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFinalisedNotifierChannel", reflect.TypeOf((*MockBlockAPI)(nil).GetFinalisedNotifierChannel)) +} + +// GetHashByNumber mocks base method. +func (m *MockBlockAPI) GetHashByNumber(arg0 uint) (common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHashByNumber", arg0) + ret0, _ := ret[0].(common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHashByNumber indicates an expected call of GetHashByNumber. +func (mr *MockBlockAPIMockRecorder) GetHashByNumber(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHashByNumber", reflect.TypeOf((*MockBlockAPI)(nil).GetHashByNumber), arg0) +} + +// GetHeader mocks base method. +func (m *MockBlockAPI) GetHeader(arg0 common.Hash) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeader", arg0) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHeader indicates an expected call of GetHeader. +func (mr *MockBlockAPIMockRecorder) GetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockBlockAPI)(nil).GetHeader), arg0) +} + +// GetHighestFinalisedHash mocks base method. +func (m *MockBlockAPI) GetHighestFinalisedHash() (common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestFinalisedHash") + ret0, _ := ret[0].(common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestFinalisedHash indicates an expected call of GetHighestFinalisedHash. +func (mr *MockBlockAPIMockRecorder) GetHighestFinalisedHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestFinalisedHash", reflect.TypeOf((*MockBlockAPI)(nil).GetHighestFinalisedHash)) +} + +// GetImportedBlockNotifierChannel mocks base method. +func (m *MockBlockAPI) GetImportedBlockNotifierChannel() chan *types.Block { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetImportedBlockNotifierChannel") + ret0, _ := ret[0].(chan *types.Block) + return ret0 +} + +// GetImportedBlockNotifierChannel indicates an expected call of GetImportedBlockNotifierChannel. +func (mr *MockBlockAPIMockRecorder) GetImportedBlockNotifierChannel() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImportedBlockNotifierChannel", reflect.TypeOf((*MockBlockAPI)(nil).GetImportedBlockNotifierChannel)) +} + +// GetJustification mocks base method. +func (m *MockBlockAPI) GetJustification(arg0 common.Hash) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetJustification", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetJustification indicates an expected call of GetJustification. +func (mr *MockBlockAPIMockRecorder) GetJustification(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetJustification", reflect.TypeOf((*MockBlockAPI)(nil).GetJustification), arg0) +} + +// GetRuntime mocks base method. +func (m *MockBlockAPI) GetRuntime(arg0 *common.Hash) (runtime.Instance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRuntime", arg0) + ret0, _ := ret[0].(runtime.Instance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRuntime indicates an expected call of GetRuntime. +func (mr *MockBlockAPIMockRecorder) GetRuntime(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRuntime", reflect.TypeOf((*MockBlockAPI)(nil).GetRuntime), arg0) +} + +// HasJustification mocks base method. +func (m *MockBlockAPI) HasJustification(arg0 common.Hash) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasJustification", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasJustification indicates an expected call of HasJustification. +func (mr *MockBlockAPIMockRecorder) HasJustification(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasJustification", reflect.TypeOf((*MockBlockAPI)(nil).HasJustification), arg0) +} + +// RegisterRuntimeUpdatedChannel mocks base method. +func (m *MockBlockAPI) RegisterRuntimeUpdatedChannel(arg0 chan<- runtime.Version) (uint32, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterRuntimeUpdatedChannel", arg0) + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegisterRuntimeUpdatedChannel indicates an expected call of RegisterRuntimeUpdatedChannel. +func (mr *MockBlockAPIMockRecorder) RegisterRuntimeUpdatedChannel(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRuntimeUpdatedChannel", reflect.TypeOf((*MockBlockAPI)(nil).RegisterRuntimeUpdatedChannel), arg0) +} + +// SubChain mocks base method. +func (m *MockBlockAPI) SubChain(arg0, arg1 common.Hash) ([]common.Hash, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubChain", arg0, arg1) + ret0, _ := ret[0].([]common.Hash) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubChain indicates an expected call of SubChain. +func (mr *MockBlockAPIMockRecorder) SubChain(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubChain", reflect.TypeOf((*MockBlockAPI)(nil).SubChain), arg0, arg1) +} + +// UnregisterRuntimeUpdatedChannel mocks base method. +func (m *MockBlockAPI) UnregisterRuntimeUpdatedChannel(arg0 uint32) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnregisterRuntimeUpdatedChannel", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// UnregisterRuntimeUpdatedChannel indicates an expected call of UnregisterRuntimeUpdatedChannel. +func (mr *MockBlockAPIMockRecorder) UnregisterRuntimeUpdatedChannel(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnregisterRuntimeUpdatedChannel", reflect.TypeOf((*MockBlockAPI)(nil).UnregisterRuntimeUpdatedChannel), arg0) +} diff --git a/dot/rpc/subscription/websocket_test.go b/dot/rpc/subscription/websocket_test.go index 33dd91d612..03d06bce69 100644 --- a/dot/rpc/subscription/websocket_test.go +++ b/dot/rpc/subscription/websocket_test.go @@ -166,7 +166,7 @@ func TestWSConn_HandleConn(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte(`{"jsonrpc":"2.0","error":{"code":null,"message":"error BlockAPI not set"},"id":1}`+"\n"), msg) - wsconn.BlockAPI = modules.NewMockBlockAPI() + wsconn.BlockAPI = modules.NewMockeryBlockAPI() res, err = wsconn.initBlockListener(1, nil) require.NoError(t, err) @@ -196,7 +196,7 @@ func TestWSConn_HandleConn(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte(`{"jsonrpc":"2.0","error":{"code":null,"message":"error BlockAPI not set"},"id":1}`+"\n"), msg) - wsconn.BlockAPI = modules.NewMockBlockAPI() + wsconn.BlockAPI = modules.NewMockeryBlockAPI() res, err = wsconn.initBlockFinalizedListener(1, nil) require.NoError(t, err) @@ -218,7 +218,7 @@ func TestWSConn_HandleConn(t *testing.T) { require.EqualError(t, err, "error BlockAPI not set") require.Nil(t, listner) - wsconn.BlockAPI = modules.NewMockBlockAPI() + wsconn.BlockAPI = modules.NewMockeryBlockAPI() listner, err = wsconn.initExtrinsicWatch(0, []interface{}{"0x26aa"}) require.NoError(t, err) require.NotNil(t, listner) diff --git a/dot/rpc/websocket_test.go b/dot/rpc/websocket_test.go index 0aceefb8a9..86fdbdef16 100644 --- a/dot/rpc/websocket_test.go +++ b/dot/rpc/websocket_test.go @@ -63,7 +63,7 @@ func TestHTTPServer_ServeHTTP(t *testing.T) { SystemName: "gossamer", } sysAPI := system.NewService(si, nil) - bAPI := modules.NewMockBlockAPI() + bAPI := modules.NewMockeryBlockAPI() sAPI := modules.NewMockStorageAPI() TxStateAPI := modules.NewMockTransactionStateAPI() diff --git a/tests/polkadotjs_test/package-lock.json b/tests/polkadotjs_test/package-lock.json index 25fce2c881..449ea2753c 100644 --- a/tests/polkadotjs_test/package-lock.json +++ b/tests/polkadotjs_test/package-lock.json @@ -1,293 +1,2556 @@ { "name": "apitest", "version": "1.0.0", - "lockfileVersion": 1, + "lockfileVersion": 2, "requires": true, + "packages": { + "": { + "name": "apitest", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@polkadot/api": "8.8.2" + }, + "devDependencies": { + "chai": "^4.2.0", + "mocha": "^8.2.1" + } + }, + "node_modules/@babel/runtime": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.18.3.tgz", + "integrity": "sha512-38Y8f7YUhce/K7RMwTp7m0uCumpv9hZkitCbBClqQIow1qSbCvGkcegKOXpEWCQLfWmevgRiWokZ1GkpfhbZug==", + "dependencies": { + "regenerator-runtime": "^0.13.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@noble/hashes": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.0.0.tgz", + "integrity": "sha512-DZVbtY62kc3kkBtMHqwCOfXrT/hnoORy5BJ4+HU1IR59X0KWAOqsfzQPcUl/lQLlG7qXbe/fZ3r/emxtAl+sqg==" + }, + "node_modules/@noble/secp256k1": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@noble/secp256k1/-/secp256k1-1.5.5.tgz", + "integrity": "sha512-sZ1W6gQzYnu45wPrWx8D3kwI2/U29VYTx9OjbDAd7jwRItJ0cSTMPRL/C8AWZFn9kWFLQGqEXVEE86w4Z8LpIQ==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@polkadot/api": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/api/-/api-8.8.2.tgz", + "integrity": "sha512-kqHYLGIivYAHGF0B19ApBANDrreUqeyXuqtNHxieQSe63yoAksyUbwTmdl58Z0WnvXg39fjXXNZzLXFt2/txIQ==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/api-augment": "8.8.2", + "@polkadot/api-base": "8.8.2", + "@polkadot/api-derive": "8.8.2", + "@polkadot/keyring": "^9.4.1", + "@polkadot/rpc-augment": "8.8.2", + "@polkadot/rpc-core": "8.8.2", + "@polkadot/rpc-provider": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/types-augment": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/types-create": "8.8.2", + "@polkadot/types-known": "8.8.2", + "@polkadot/util": "^9.4.1", + "@polkadot/util-crypto": "^9.4.1", + "eventemitter3": "^4.0.7", + "rxjs": "^7.5.5" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/api-augment": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/api-augment/-/api-augment-8.8.2.tgz", + "integrity": "sha512-c99guuBvHrGbFBD9x32YG4Yc5osP1jVkGz/hlriRuTZNMUa/ZBjeoZtbVchL4PlpNC1sjdvvrIC9j3uQhvYHJQ==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/api-base": "8.8.2", + "@polkadot/rpc-augment": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/types-augment": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/api-base": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/api-base/-/api-base-8.8.2.tgz", + "integrity": "sha512-V04Hw6WJhWGUr5m50lNWE/9ao7ZjcJq005kVMtMRdI94HLmKDMnS3M4EI6USGtLWQ0VOlIMmlp7k2R3SyVFwQA==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/rpc-core": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/util": "^9.4.1", + "rxjs": "^7.5.5" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/api-derive": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/api-derive/-/api-derive-8.8.2.tgz", + "integrity": "sha512-ltHft5kp+TFasolSSQlip6zQpw3WFinu6CQZRmcAAyGaM7QgNweIWh3ZdoigrjnZaJPraGWNCfJv0pSg+2j0vg==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/api": "8.8.2", + "@polkadot/api-augment": "8.8.2", + "@polkadot/api-base": "8.8.2", + "@polkadot/rpc-core": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1", + "@polkadot/util-crypto": "^9.4.1", + "rxjs": "^7.5.5" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/keyring": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/keyring/-/keyring-9.4.1.tgz", + "integrity": "sha512-op6Tj8E9GHeZYvEss38FRUrX+GlBj6qiwF4BlFrAvPqjPnRn8TT9NhRLroiCwvxeNg3uMtEF/5xB+vvdI0I6qw==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/util": "9.4.1", + "@polkadot/util-crypto": "9.4.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "@polkadot/util": "9.4.1", + "@polkadot/util-crypto": "9.4.1" + } + }, + "node_modules/@polkadot/networks": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/networks/-/networks-9.4.1.tgz", + "integrity": "sha512-ibH8bZ2/XMXv0XEsP1fGOqNnm2mg1rHo5kHXSJ3QBcZJFh1+xkI4Ovl2xrFfZ+SYATA3Wsl5R6knqimk2EqyJQ==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/util": "9.4.1", + "@substrate/ss58-registry": "^1.22.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/rpc-augment": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-augment/-/rpc-augment-8.8.2.tgz", + "integrity": "sha512-z9rOSmPvcS/YQSJIhM5F2uLyYZ6azll35V9xGs19hypO5wkwzLYByLbXQ7j1SFI267q/IIXVnri0yI6mtsQgzA==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/rpc-core": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/rpc-core": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-core/-/rpc-core-8.8.2.tgz", + "integrity": "sha512-2MrIra52NYsvWv192sHM5b6dUXYYYzA8IB/rB7YF9Hm4aIDJbQJ/8uBivHZjMzyHsegxMDAe9WQSEkR0eagojQ==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/rpc-augment": "8.8.2", + "@polkadot/rpc-provider": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/util": "^9.4.1", + "rxjs": "^7.5.5" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/rpc-provider": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-provider/-/rpc-provider-8.8.2.tgz", + "integrity": "sha512-LzzTTOxmqDndOcYdukYkpfEBq3GlbKAOb2pisKF4CtcGPcZ6bG0vktwx6qlWQ+Apmdu98rabt+iQPfwvOSg8sA==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/keyring": "^9.4.1", + "@polkadot/types": "8.8.2", + "@polkadot/types-support": "8.8.2", + "@polkadot/util": "^9.4.1", + "@polkadot/util-crypto": "^9.4.1", + "@polkadot/x-fetch": "^9.4.1", + "@polkadot/x-global": "^9.4.1", + "@polkadot/x-ws": "^9.4.1", + "@substrate/connect": "0.7.5", + "eventemitter3": "^4.0.7", + "mock-socket": "^9.1.5", + "nock": "^13.2.6" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/types": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types/-/types-8.8.2.tgz", + "integrity": "sha512-O90MEfGbpPh/FmUAv0m3LcweZLWH6pmkODb1EGnwBHjZadYLCHFjdFO50yhoch9hh3+aEFmac6ma8swsy6IjAw==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/keyring": "^9.4.1", + "@polkadot/types-augment": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/types-create": "8.8.2", + "@polkadot/util": "^9.4.1", + "@polkadot/util-crypto": "^9.4.1", + "rxjs": "^7.5.5" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/types-augment": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-augment/-/types-augment-8.8.2.tgz", + "integrity": "sha512-WalxIz5Z0RPp2FS0cWvhBjYL7FKzDqkIBc+r/DN4vYRQzp5JBVNJjPWWUPtq9ucEl1wiaD2vJNG34rWIYVtObg==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/types": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/types-codec": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-codec/-/types-codec-8.8.2.tgz", + "integrity": "sha512-p3YZU8WZIMnnSxTKpoiCPi64T/sSR7dX7ObkpvUITulE6dzXUPUvkdSVS9YlTlb4R43pZ0iSyB18vpnlpq8LYQ==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/util": "^9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/types-create": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-create/-/types-create-8.8.2.tgz", + "integrity": "sha512-YMpiLCVFs2KKpvn3n24HahUzneaLKmjgwwd+QvFCooJClV/0YK22kwvlEteLO3aWPx2jy8ySSpUFn8kd/oWEAA==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/types-known": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-known/-/types-known-8.8.2.tgz", + "integrity": "sha512-Ywa7v7K+UIYpQM3gbl6oA0zKiriX1OJfoYBxX7BcVLKW8cWmdy2xH9W6qNqxDWGAc2LXqNLhn0uzaRxq1niCCQ==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/networks": "^9.4.1", + "@polkadot/types": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/types-create": "8.8.2", + "@polkadot/util": "^9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/types-support": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-support/-/types-support-8.8.2.tgz", + "integrity": "sha512-z4yjN8odDgFFlhGBrJAeHX4YsUeprmBAzWDCJMBeL4C/E1yIG7RyzQryVJNb3m/galiX1Tzuuch4kqE/jABnfw==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/util": "^9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/util": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/util/-/util-9.4.1.tgz", + "integrity": "sha512-z0HcnIe3zMWyK1s09wQIwc1M8gDKygSF9tDAbC8H9KDeIRZB2ldhwWEFx/1DJGOgFFrmRfkxeC6dcDpfzQhFow==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/x-bigint": "9.4.1", + "@polkadot/x-global": "9.4.1", + "@polkadot/x-textdecoder": "9.4.1", + "@polkadot/x-textencoder": "9.4.1", + "@types/bn.js": "^5.1.0", + "bn.js": "^5.2.1", + "ip-regex": "^4.3.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/util-crypto": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/util-crypto/-/util-crypto-9.4.1.tgz", + "integrity": "sha512-V6xMOjdd8Kt/QmXlcDYM4WJDAmKuH4vWSlIcMmkFHnwH/NtYVdYIDZswLQHKL8gjLijPfVTHpWaJqNFhGpZJEg==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@noble/hashes": "1.0.0", + "@noble/secp256k1": "1.5.5", + "@polkadot/networks": "9.4.1", + "@polkadot/util": "9.4.1", + "@polkadot/wasm-crypto": "^6.1.1", + "@polkadot/x-bigint": "9.4.1", + "@polkadot/x-randomvalues": "9.4.1", + "@scure/base": "1.0.0", + "ed2curve": "^0.3.0", + "tweetnacl": "^1.0.3" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "@polkadot/util": "9.4.1" + } + }, + "node_modules/@polkadot/wasm-bridge": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-bridge/-/wasm-bridge-6.1.1.tgz", + "integrity": "sha512-Cy0k00VCu+HWxie+nn9GWPlSPdiZl8Id8ulSGA2FKET0jIbffmOo4e1E2FXNucfR1UPEpqov5BCF9T5YxEXZDg==", + "dependencies": { + "@babel/runtime": "^7.17.9" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "@polkadot/util": "*", + "@polkadot/x-randomvalues": "*" + } + }, + "node_modules/@polkadot/wasm-crypto": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto/-/wasm-crypto-6.1.1.tgz", + "integrity": "sha512-hv9RCbMYtgjCy7+FKZFnO2Afu/whax9sk6udnZqGRBRiwaNagtyliWZGrKNGvaXMIO0VyaY4jWUwSzUgPrLu1A==", + "dependencies": { + "@babel/runtime": "^7.17.9", + "@polkadot/wasm-bridge": "6.1.1", + "@polkadot/wasm-crypto-asmjs": "6.1.1", + "@polkadot/wasm-crypto-init": "6.1.1", + "@polkadot/wasm-crypto-wasm": "6.1.1", + "@polkadot/wasm-util": "6.1.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "@polkadot/util": "*" + } + }, + "node_modules/@polkadot/wasm-crypto-asmjs": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-asmjs/-/wasm-crypto-asmjs-6.1.1.tgz", + "integrity": "sha512-gG4FStVumkyRNH7WcTB+hn3EEwCssJhQyi4B1BOUt+eYYmw9xJdzIhqjzSd9b/yF2e5sRaAzfnMj2srGufsE6A==", + "dependencies": { + "@babel/runtime": "^7.17.9" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "@polkadot/util": "*" + } + }, + "node_modules/@polkadot/wasm-crypto-init": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-init/-/wasm-crypto-init-6.1.1.tgz", + "integrity": "sha512-rbBm/9FOOUjISL4gGNokjcKy2X+Af6Chaet4zlabatpImtPIAK26B2UUBGoaRUnvl/w6K3+GwBL4LuBC+CvzFw==", + "dependencies": { + "@babel/runtime": "^7.17.9", + "@polkadot/wasm-bridge": "6.1.1", + "@polkadot/wasm-crypto-asmjs": "6.1.1", + "@polkadot/wasm-crypto-wasm": "6.1.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "@polkadot/util": "*" + } + }, + "node_modules/@polkadot/wasm-crypto-wasm": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-wasm/-/wasm-crypto-wasm-6.1.1.tgz", + "integrity": "sha512-zkz5Ct4KfTBT+YNEA5qbsHhTV58/FAxDave8wYIOaW4TrBnFPPs+J0WBWlGFertgIhPkvjFnQC/xzRyhet9prg==", + "dependencies": { + "@babel/runtime": "^7.17.9", + "@polkadot/wasm-util": "6.1.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "@polkadot/util": "*" + } + }, + "node_modules/@polkadot/wasm-util": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-util/-/wasm-util-6.1.1.tgz", + "integrity": "sha512-DgpLoFXMT53UKcfZ8eT2GkJlJAOh89AWO+TP6a6qeZQpvXVe5f1yR45WQpkZlgZyUP+/19+kY56GK0pQxfslqg==", + "dependencies": { + "@babel/runtime": "^7.17.9" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "@polkadot/util": "*" + } + }, + "node_modules/@polkadot/x-bigint": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-bigint/-/x-bigint-9.4.1.tgz", + "integrity": "sha512-KlbXboegENoyrpjj+eXfY13vsqrXgk4620zCAUhKNH622ogdvAepHbY/DpV6w0FLEC6MwN9zd5cRuDBEXVeWiw==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/x-fetch": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-fetch/-/x-fetch-9.4.1.tgz", + "integrity": "sha512-CZFPZKgy09TOF5pOFRVVhGrAaAPdSMyrUSKwdO2I8DzdIE1tmjnol50dlnZja5t8zTD0n1uIY1H4CEWwc5NF/g==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1", + "@types/node-fetch": "^2.6.1", + "node-fetch": "^2.6.7" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/x-global": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-global/-/x-global-9.4.1.tgz", + "integrity": "sha512-eN4oZeRdIKQeUPNN7OtH5XeYp349d8V9+gW6W0BmCfB2lTg8TDlG1Nj+Cyxpjl9DNF5CiKudTq72zr0dDSRbwA==", + "dependencies": { + "@babel/runtime": "^7.18.3" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/x-randomvalues": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-randomvalues/-/x-randomvalues-9.4.1.tgz", + "integrity": "sha512-TLOQw3JNPgCrcq9WO2ipdeG8scsSreu3m9hwj3n7nX/QKlVzSf4G5bxJo5TW1dwcUdHwBuVox+3zgCmo+NPh+Q==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/x-textdecoder": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-textdecoder/-/x-textdecoder-9.4.1.tgz", + "integrity": "sha512-yLulcgVASFUBJqrvS6Ssy0ko9teAfbu1ajH0r3Jjnqkpmmz2DJ1CS7tAktVa7THd4GHPGeKAVfxl+BbV/LZl+w==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/x-textencoder": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-textencoder/-/x-textencoder-9.4.1.tgz", + "integrity": "sha512-/47wa31jBa43ULqMO60vzcJigTG+ZAGNcyT5r6hFLrQzRzc8nIBjIOD8YWtnKM92r9NvlNv2wJhdamqyU0mntg==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@polkadot/x-ws": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-ws/-/x-ws-9.4.1.tgz", + "integrity": "sha512-zQjVxXgHsBVn27u4bjY01cFO6XWxgv2b3MMOpNHTKTAs8SLEmFf0LcT7fBShimyyudyTeJld5pHApJ4qp1OXxA==", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1", + "@types/websocket": "^1.0.5", + "websocket": "^1.0.34" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@scure/base": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.0.0.tgz", + "integrity": "sha512-gIVaYhUsy+9s58m/ETjSJVKHhKTBMmcRb9cEV5/5dwvfDlfORjKrFsDeDHWRrm6RjcPvCLZFwGJjAjLj1gg4HA==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@substrate/connect": { + "version": "0.7.5", + "resolved": "https://registry.npmjs.org/@substrate/connect/-/connect-0.7.5.tgz", + "integrity": "sha512-sdAZ6IGuTNxRGlH/O+6IaXvkYzZFwMK03VbQMgxUzry9dz1+JzyaNf8iOTVHxhMIUZc0h0E90JQz/hNiUYPlUw==", + "dependencies": { + "@substrate/connect-extension-protocol": "^1.0.0", + "@substrate/smoldot-light": "0.6.16", + "eventemitter3": "^4.0.7" + } + }, + "node_modules/@substrate/connect-extension-protocol": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@substrate/connect-extension-protocol/-/connect-extension-protocol-1.0.0.tgz", + "integrity": "sha512-nFVuKdp71hMd/MGlllAOh+a2hAqt8m6J2G0aSsS/RcALZexxF9jodbFc62ni8RDtJboeOfXAHhenYOANvJKPIg==" + }, + "node_modules/@substrate/smoldot-light": { + "version": "0.6.16", + "resolved": "https://registry.npmjs.org/@substrate/smoldot-light/-/smoldot-light-0.6.16.tgz", + "integrity": "sha512-Ej0ZdNPTW0EXbp45gv/5Kt/JV+c9cmRZRYAXg+EALxXPm0hW9h2QdVLm61A2PAskOGptW4wnJ1WzzruaenwAXQ==", + "dependencies": { + "buffer": "^6.0.1", + "pako": "^2.0.4", + "websocket": "^1.0.32" + } + }, + "node_modules/@substrate/ss58-registry": { + "version": "1.22.0", + "resolved": "https://registry.npmjs.org/@substrate/ss58-registry/-/ss58-registry-1.22.0.tgz", + "integrity": "sha512-IKqrPY0B3AeIXEc5/JGgEhPZLy+SmVyQf+k0SIGcNSTqt1GLI3gQFEOFwSScJdem+iYZQUrn6YPPxC3TpdSC3A==" + }, + "node_modules/@types/bn.js": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-5.1.0.tgz", + "integrity": "sha512-QSSVYj7pYFN49kW77o2s9xTCwZ8F2xLbjLLSEVh8D2F4JUhZtPAGOFLTD+ffqksBx/u4cE/KImFjyhqCjn/LIA==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/node": { + "version": "17.0.43", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.43.tgz", + "integrity": "sha512-jnUpgw8fL9kP2iszfIDyBQtw5Mf4/XSqy0Loc1J9pI14ejL83XcCEvSf50Gs/4ET0I9VCCDoOfufQysj0S66xA==" + }, + "node_modules/@types/node-fetch": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.1.tgz", + "integrity": "sha512-oMqjURCaxoSIsHSr1E47QHzbmzNR5rK8McHuNb11BOM9cHcIK3Avy0s/b2JlXHoQGTYS3NsvWzV1M0iK7l0wbA==", + "dependencies": { + "@types/node": "*", + "form-data": "^3.0.0" + } + }, + "node_modules/@types/websocket": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/websocket/-/websocket-1.0.5.tgz", + "integrity": "sha512-NbsqiNX9CnEfC1Z0Vf4mE1SgAJ07JnRYcNex7AJ9zAVzmiGHmjKFEk7O4TJIsgv2B1sLEb6owKFZrACwdYngsQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@ungap/promise-all-settled": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", + "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==", + "dev": true + }, + "node_modules/ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-regex": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.1.tgz", + "integrity": "sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", + "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/assertion-error": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", + "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/bn.js": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz", + "integrity": "sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==" + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/bufferutil": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/bufferutil/-/bufferutil-4.0.6.tgz", + "integrity": "sha512-jduaYOYtnio4aIAyc6UbvPCVcgq7nYpVnucyxr6eCYg/Woad9Hf/oxxBRDnGGjPfjUm6j5O/uBWhIu4iLebFaw==", + "hasInstallScript": true, + "dependencies": { + "node-gyp-build": "^4.3.0" + }, + "engines": { + "node": ">=6.14.2" + } + }, + "node_modules/chai": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.4.tgz", + "integrity": "sha512-yS5H68VYOCtN1cjfwumDSuzn/9c+yza4f3reKXlE5rUg7SFcCEy90gJvydNgOYtblyf4Zi6jIWRnXOgErta0KA==", + "dev": true, + "dependencies": { + "assertion-error": "^1.1.0", + "check-error": "^1.0.2", + "deep-eql": "^3.0.1", + "get-func-name": "^2.0.0", + "pathval": "^1.1.1", + "type-detect": "^4.0.5" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chalk": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", + "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/check-error": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", + "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/chokidar": { + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", + "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", + "dev": true, + "dependencies": { + "anymatch": "~3.1.1", + "braces": "~3.0.2", + "glob-parent": "~5.1.0", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.5.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.1" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/string-width": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", + "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "node_modules/d": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", + "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", + "dependencies": { + "es5-ext": "^0.10.50", + "type": "^1.0.1" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-eql": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", + "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", + "dev": true, + "dependencies": { + "type-detect": "^4.0.0" + }, + "engines": { + "node": ">=0.12" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/diff": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", + "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/ed2curve": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/ed2curve/-/ed2curve-0.3.0.tgz", + "integrity": "sha512-8w2fmmq3hv9rCrcI7g9hms2pMunQr1JINfcjwR9tAyZqhtyaMN991lF/ZfHfr5tzZQ8c7y7aBgZbjfbd0fjFwQ==", + "dependencies": { + "tweetnacl": "1.x.x" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/es5-ext": { + "version": "0.10.61", + "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.61.tgz", + "integrity": "sha512-yFhIqQAzu2Ca2I4SE2Au3rxVfmohU9Y7wqGR+s7+H7krk26NXhIRAZDgqd6xqjCEFUomDEA3/Bo/7fKmIkW1kA==", + "hasInstallScript": true, + "dependencies": { + "es6-iterator": "^2.0.3", + "es6-symbol": "^3.1.3", + "next-tick": "^1.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/es6-iterator": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", + "integrity": "sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==", + "dependencies": { + "d": "1", + "es5-ext": "^0.10.35", + "es6-symbol": "^3.1.1" + } + }, + "node_modules/es6-symbol": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz", + "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==", + "dependencies": { + "d": "^1.0.1", + "ext": "^1.1.2" + } + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + }, + "node_modules/ext": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/ext/-/ext-1.6.0.tgz", + "integrity": "sha512-sdBImtzkq2HpkdRLtlLWDa6w4DX22ijZLKx8BMPUuKe1c5lbN6xwQDQCxSfxBQnHZ13ls/FH0MQZx/q/gr6FQg==", + "dependencies": { + "type": "^2.5.0" + } + }, + "node_modules/ext/node_modules/type": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/type/-/type-2.6.0.tgz", + "integrity": "sha512-eiDBDOmkih5pMbo9OqsqPRGMljLodLcwd5XD5JbtNB0o89xZAwynY9EdCDsJU7LtcVCClu9DvM7/0Ep1hYX3EQ==" + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "bin": { + "flat": "cli.js" + } + }, + "node_modules/form-data": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz", + "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-func-name": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", + "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/growl": { + "version": "1.10.5", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", + "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "dev": true, + "engines": { + "node": ">=4.x" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "bin": { + "he": "bin/he" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/ip-regex": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-4.3.0.tgz", + "integrity": "sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "node_modules/js-yaml": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.0.0.tgz", + "integrity": "sha512-pqon0s+4ScYUvX30wxQi3PogGFAlUyH0awepWvwkj4jD4v+ova3RiYw8bmA6x2rDrEaj8i/oWKoRxpVNW+Re8Q==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/log-symbols": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", + "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/mocha": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-8.4.0.tgz", + "integrity": "sha512-hJaO0mwDXmZS4ghXsvPVriOhsxQ7ofcpQdm8dE+jISUOKopitvnXFQmpRR7jd2K6VBG6E26gU3IAbXXGIbu4sQ==", + "dev": true, + "dependencies": { + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.5.1", + "debug": "4.3.1", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.6", + "growl": "1.10.5", + "he": "1.2.0", + "js-yaml": "4.0.0", + "log-symbols": "4.0.0", + "minimatch": "3.0.4", + "ms": "2.1.3", + "nanoid": "3.1.20", + "serialize-javascript": "5.0.1", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "which": "2.0.2", + "wide-align": "1.1.3", + "workerpool": "6.1.0", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha" + }, + "engines": { + "node": ">= 10.12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mochajs" + } + }, + "node_modules/mocha/node_modules/debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/mocha/node_modules/debug/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/mocha/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, + "node_modules/mock-socket": { + "version": "9.1.5", + "resolved": "https://registry.npmjs.org/mock-socket/-/mock-socket-9.1.5.tgz", + "integrity": "sha512-3DeNIcsQixWHHKk6NdoBhWI4t1VMj5/HzfnI1rE/pLl5qKx7+gd4DNA07ehTaZ6MoUU053si6Hd+YtiM/tQZfg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/nanoid": { + "version": "3.1.20", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.20.tgz", + "integrity": "sha512-a1cQNyczgKbLX9jwbS/+d7W8fX/RfgYR7lVWwWOGIPNgK2m0MWvrGF6/m4kk6U3QcFMnZf3RIhL0v2Jgh/0Uxw==", + "dev": true, + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/next-tick": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz", + "integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==" + }, + "node_modules/nock": { + "version": "13.2.7", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.2.7.tgz", + "integrity": "sha512-R6NUw7RIPtKwgK7jskuKoEi4VFMqIHtV2Uu9K/Uegc4TA5cqe+oNMYslZcUmnVNQCTG6wcSqUBaGTDd7sq5srg==", + "dependencies": { + "debug": "^4.1.0", + "json-stringify-safe": "^5.0.1", + "lodash": "^4.17.21", + "propagate": "^2.0.0" + }, + "engines": { + "node": ">= 10.13" + } + }, + "node_modules/node-fetch": { + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-gyp-build": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.4.0.tgz", + "integrity": "sha512-amJnQCcgtRVw9SvoebO3BKGESClrfXGCUTX9hSn1OuGQTQBOZmVd0Z0OlecpuRksKvbsUqALE8jls/ErClAPuQ==", + "bin": { + "node-gyp-build": "bin.js", + "node-gyp-build-optional": "optional.js", + "node-gyp-build-test": "build-test.js" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pako": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pako/-/pako-2.0.4.tgz", + "integrity": "sha512-v8tweI900AUkZN6heMU/4Uy4cXRc2AYNRggVmTR+dEncawDJgCdLMximOVA2p4qO57WMynangsfGRb5WD6L1Bg==" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pathval": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", + "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/picomatch": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", + "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/propagate": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/propagate/-/propagate-2.0.1.tgz", + "integrity": "sha512-vGrhOavPSTz4QVNuBNdcNXePNdNMaO1xj9yBeH1ScQPjk/rhg9sSlCXPhMkFuaNNW/syTvYqsnbIJxMBfRbbag==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readdirp": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", + "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.13.9", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", + "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/rxjs": { + "version": "7.5.5", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.5.5.tgz", + "integrity": "sha512-sy+H0pQofO95VDmFLzyaw9xNJU4KTRSwQIGM6+iG3SypAtCiLDzpeG8sJrNCWn2Up9km+KhkvTdbkrdy+yzZdw==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/serialize-javascript": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-5.0.1.tgz", + "integrity": "sha512-SaaNal9imEO737H2c05Og0/8LUXG7EnsZyMa8MzkmuHoELfT6txuj0cMqRj6zfPKnmQ1yasR4PCJc8x+M4JSPA==", + "dev": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "dependencies": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true, + "dependencies": { + "ansi-regex": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/tslib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz", + "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" + }, + "node_modules/tweetnacl": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-1.0.3.tgz", + "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==" + }, + "node_modules/type": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", + "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==" + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/utf-8-validate": { + "version": "5.0.9", + "resolved": "https://registry.npmjs.org/utf-8-validate/-/utf-8-validate-5.0.9.tgz", + "integrity": "sha512-Yek7dAy0v3Kl0orwMlvi7TPtiCNrdfHNd7Gcc/pLq4BLXqfAmd0J7OWMizUQnTTJsyjKn02mU7anqwfmUP4J8Q==", + "hasInstallScript": true, + "dependencies": { + "node-gyp-build": "^4.3.0" + }, + "engines": { + "node": ">=6.14.2" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/websocket": { + "version": "1.0.34", + "resolved": "https://registry.npmjs.org/websocket/-/websocket-1.0.34.tgz", + "integrity": "sha512-PRDso2sGwF6kM75QykIesBijKSVceR6jL2G8NGYyq2XrItNC2P5/qL5XeR056GhA+Ly7JMFvJb9I312mJfmqnQ==", + "dependencies": { + "bufferutil": "^4.0.1", + "debug": "^2.2.0", + "es5-ext": "^0.10.50", + "typedarray-to-buffer": "^3.1.5", + "utf-8-validate": "^5.0.2", + "yaeti": "^0.0.6" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/websocket/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/websocket/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wide-align": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", + "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", + "dev": true, + "dependencies": { + "string-width": "^1.0.2 || 2" + } + }, + "node_modules/workerpool": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.0.tgz", + "integrity": "sha512-toV7q9rWNYha963Pl/qyeZ6wG+3nnsyvolaNUS8+R5Wtw6qJPTxIlOP1ZSvcGhEJw+l3HMMmtiNo9Gl61G4GVg==", + "dev": true + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/string-width": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", + "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yaeti": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/yaeti/-/yaeti-0.0.6.tgz", + "integrity": "sha512-MvQa//+KcZCUkBTIC9blM+CU9J2GzuTytsOUwf2lidtvkx/6gnEp1QvJv34t9vdjhFmha/mUiNDbN0D0mJWdug==", + "engines": { + "node": ">=0.10.32" + } + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.4", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", + "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser/node_modules/camelcase": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", + "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yargs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/string-width": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", + "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + }, "dependencies": { "@babel/runtime": { - "version": "7.14.6", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.14.6.tgz", - "integrity": "sha512-/PCB2uJ7oM44tz8YhC4Z/6PeOKXp4K588f+5M3clr1M4zbqztlo0XEfJ2LEzj/FgwfgGcIdl8n7YYjTCI0BYwg==", + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.18.3.tgz", + "integrity": "sha512-38Y8f7YUhce/K7RMwTp7m0uCumpv9hZkitCbBClqQIow1qSbCvGkcegKOXpEWCQLfWmevgRiWokZ1GkpfhbZug==", "requires": { "regenerator-runtime": "^0.13.4" } }, + "@noble/hashes": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.0.0.tgz", + "integrity": "sha512-DZVbtY62kc3kkBtMHqwCOfXrT/hnoORy5BJ4+HU1IR59X0KWAOqsfzQPcUl/lQLlG7qXbe/fZ3r/emxtAl+sqg==" + }, + "@noble/secp256k1": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@noble/secp256k1/-/secp256k1-1.5.5.tgz", + "integrity": "sha512-sZ1W6gQzYnu45wPrWx8D3kwI2/U29VYTx9OjbDAd7jwRItJ0cSTMPRL/C8AWZFn9kWFLQGqEXVEE86w4Z8LpIQ==" + }, "@polkadot/api": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@polkadot/api/-/api-4.5.1.tgz", - "integrity": "sha512-b9CBG1ZGhyFwXDiVP0vKZbY8RdW2rbtHxw3BYPYUZ4bk6NVsDCk7vPD2z3B19RxHOv7Chkjtx+b5MU6ASfKRhg==", - "requires": { - "@babel/runtime": "^7.13.10", - "@polkadot/api-derive": "4.5.1", - "@polkadot/keyring": "^6.1.1", - "@polkadot/metadata": "4.5.1", - "@polkadot/rpc-core": "4.5.1", - "@polkadot/rpc-provider": "4.5.1", - "@polkadot/types": "4.5.1", - "@polkadot/types-known": "4.5.1", - "@polkadot/util": "^6.1.1", - "@polkadot/util-crypto": "^6.1.1", - "@polkadot/x-rxjs": "^6.1.1", - "bn.js": "^4.11.9", - "eventemitter3": "^4.0.7" + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/api/-/api-8.8.2.tgz", + "integrity": "sha512-kqHYLGIivYAHGF0B19ApBANDrreUqeyXuqtNHxieQSe63yoAksyUbwTmdl58Z0WnvXg39fjXXNZzLXFt2/txIQ==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/api-augment": "8.8.2", + "@polkadot/api-base": "8.8.2", + "@polkadot/api-derive": "8.8.2", + "@polkadot/keyring": "^9.4.1", + "@polkadot/rpc-augment": "8.8.2", + "@polkadot/rpc-core": "8.8.2", + "@polkadot/rpc-provider": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/types-augment": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/types-create": "8.8.2", + "@polkadot/types-known": "8.8.2", + "@polkadot/util": "^9.4.1", + "@polkadot/util-crypto": "^9.4.1", + "eventemitter3": "^4.0.7", + "rxjs": "^7.5.5" + } + }, + "@polkadot/api-augment": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/api-augment/-/api-augment-8.8.2.tgz", + "integrity": "sha512-c99guuBvHrGbFBD9x32YG4Yc5osP1jVkGz/hlriRuTZNMUa/ZBjeoZtbVchL4PlpNC1sjdvvrIC9j3uQhvYHJQ==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/api-base": "8.8.2", + "@polkadot/rpc-augment": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/types-augment": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1" + } + }, + "@polkadot/api-base": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/api-base/-/api-base-8.8.2.tgz", + "integrity": "sha512-V04Hw6WJhWGUr5m50lNWE/9ao7ZjcJq005kVMtMRdI94HLmKDMnS3M4EI6USGtLWQ0VOlIMmlp7k2R3SyVFwQA==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/rpc-core": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/util": "^9.4.1", + "rxjs": "^7.5.5" } }, "@polkadot/api-derive": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@polkadot/api-derive/-/api-derive-4.5.1.tgz", - "integrity": "sha512-La2FWlwWpjDv5F+TLCxm+air2LINNrav0nCq62bzZ4uaIlWI8yN2W7ejtT29vuDK8DY46qemOZ/7ZA2wKeylEg==", - "requires": { - "@babel/runtime": "^7.13.10", - "@polkadot/api": "4.5.1", - "@polkadot/rpc-core": "4.5.1", - "@polkadot/types": "4.5.1", - "@polkadot/util": "^6.1.1", - "@polkadot/util-crypto": "^6.1.1", - "@polkadot/x-rxjs": "^6.1.1", - "bn.js": "^4.11.9" + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/api-derive/-/api-derive-8.8.2.tgz", + "integrity": "sha512-ltHft5kp+TFasolSSQlip6zQpw3WFinu6CQZRmcAAyGaM7QgNweIWh3ZdoigrjnZaJPraGWNCfJv0pSg+2j0vg==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/api": "8.8.2", + "@polkadot/api-augment": "8.8.2", + "@polkadot/api-base": "8.8.2", + "@polkadot/rpc-core": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1", + "@polkadot/util-crypto": "^9.4.1", + "rxjs": "^7.5.5" } }, "@polkadot/keyring": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/keyring/-/keyring-6.11.1.tgz", - "integrity": "sha512-rW8INl7pO6Dmaffd6Df1yAYCRWa2RmWQ0LGfJeA/M6seVIkI6J3opZqAd4q2Op+h9a7z4TESQGk8yggOEL+Csg==", + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/keyring/-/keyring-9.4.1.tgz", + "integrity": "sha512-op6Tj8E9GHeZYvEss38FRUrX+GlBj6qiwF4BlFrAvPqjPnRn8TT9NhRLroiCwvxeNg3uMtEF/5xB+vvdI0I6qw==", "requires": { - "@babel/runtime": "^7.14.6", - "@polkadot/util": "6.11.1", - "@polkadot/util-crypto": "6.11.1" + "@babel/runtime": "^7.18.3", + "@polkadot/util": "9.4.1", + "@polkadot/util-crypto": "9.4.1" } }, - "@polkadot/metadata": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@polkadot/metadata/-/metadata-4.5.1.tgz", - "integrity": "sha512-DLbeDx1MiYJaZJLG4YrM/YQXilqHCxuyxuN4H7UZ6UrnE1E1Tariz0B/EhJE7gSR5kCGCxscLIvO0pxJGGGSCA==", + "@polkadot/networks": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/networks/-/networks-9.4.1.tgz", + "integrity": "sha512-ibH8bZ2/XMXv0XEsP1fGOqNnm2mg1rHo5kHXSJ3QBcZJFh1+xkI4Ovl2xrFfZ+SYATA3Wsl5R6knqimk2EqyJQ==", "requires": { - "@babel/runtime": "^7.13.10", - "@polkadot/types": "4.5.1", - "@polkadot/types-known": "4.5.1", - "@polkadot/util": "^6.1.1", - "@polkadot/util-crypto": "^6.1.1", - "bn.js": "^4.11.9" + "@babel/runtime": "^7.18.3", + "@polkadot/util": "9.4.1", + "@substrate/ss58-registry": "^1.22.0" } }, - "@polkadot/networks": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/networks/-/networks-6.11.1.tgz", - "integrity": "sha512-0C6Ha2kvr42se3Gevx6UhHzv3KnPHML0N73Amjwvdr4y0HLZ1Nfw+vcm5yqpz5gpiehqz97XqFrsPRauYdcksQ==", + "@polkadot/rpc-augment": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-augment/-/rpc-augment-8.8.2.tgz", + "integrity": "sha512-z9rOSmPvcS/YQSJIhM5F2uLyYZ6azll35V9xGs19hypO5wkwzLYByLbXQ7j1SFI267q/IIXVnri0yI6mtsQgzA==", "requires": { - "@babel/runtime": "^7.14.6" + "@babel/runtime": "^7.18.3", + "@polkadot/rpc-core": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1" } }, "@polkadot/rpc-core": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-core/-/rpc-core-4.5.1.tgz", - "integrity": "sha512-nYbFY2U7h0p39EE7OZLhnrHnLIghWnz13hCdY4ApFTVFrPGq83z2zBAObyhbELxXw+kFQf867svbpvcpB9MclA==", + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-core/-/rpc-core-8.8.2.tgz", + "integrity": "sha512-2MrIra52NYsvWv192sHM5b6dUXYYYzA8IB/rB7YF9Hm4aIDJbQJ/8uBivHZjMzyHsegxMDAe9WQSEkR0eagojQ==", "requires": { - "@babel/runtime": "^7.13.10", - "@polkadot/metadata": "4.5.1", - "@polkadot/rpc-provider": "4.5.1", - "@polkadot/types": "4.5.1", - "@polkadot/util": "^6.1.1", - "@polkadot/x-rxjs": "^6.1.1" + "@babel/runtime": "^7.18.3", + "@polkadot/rpc-augment": "8.8.2", + "@polkadot/rpc-provider": "8.8.2", + "@polkadot/types": "8.8.2", + "@polkadot/util": "^9.4.1", + "rxjs": "^7.5.5" } }, "@polkadot/rpc-provider": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-provider/-/rpc-provider-4.5.1.tgz", - "integrity": "sha512-XaXFf6+rqV+E9uq5AYtYkl8cSqq0yb3LrMSTWGklHE5Fi1yJZWhTpnpjslmatPsyEMdZhQmsLd5rOER1ua7wCw==", - "requires": { - "@babel/runtime": "^7.13.10", - "@polkadot/types": "4.5.1", - "@polkadot/util": "^6.1.1", - "@polkadot/util-crypto": "^6.1.1", - "@polkadot/x-fetch": "^6.1.1", - "@polkadot/x-global": "^6.1.1", - "@polkadot/x-ws": "^6.1.1", - "bn.js": "^4.11.9", - "eventemitter3": "^4.0.7" + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/rpc-provider/-/rpc-provider-8.8.2.tgz", + "integrity": "sha512-LzzTTOxmqDndOcYdukYkpfEBq3GlbKAOb2pisKF4CtcGPcZ6bG0vktwx6qlWQ+Apmdu98rabt+iQPfwvOSg8sA==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/keyring": "^9.4.1", + "@polkadot/types": "8.8.2", + "@polkadot/types-support": "8.8.2", + "@polkadot/util": "^9.4.1", + "@polkadot/util-crypto": "^9.4.1", + "@polkadot/x-fetch": "^9.4.1", + "@polkadot/x-global": "^9.4.1", + "@polkadot/x-ws": "^9.4.1", + "@substrate/connect": "0.7.5", + "eventemitter3": "^4.0.7", + "mock-socket": "^9.1.5", + "nock": "^13.2.6" } }, "@polkadot/types": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@polkadot/types/-/types-4.5.1.tgz", - "integrity": "sha512-EcRdhk4od9e1ju6/upK02nvJ/eji5DOe4vA5YzdvIls98M4H0TgRNr9x6FE+WWPBIeJFrskarINA9ErCCpkQIA==", + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types/-/types-8.8.2.tgz", + "integrity": "sha512-O90MEfGbpPh/FmUAv0m3LcweZLWH6pmkODb1EGnwBHjZadYLCHFjdFO50yhoch9hh3+aEFmac6ma8swsy6IjAw==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/keyring": "^9.4.1", + "@polkadot/types-augment": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/types-create": "8.8.2", + "@polkadot/util": "^9.4.1", + "@polkadot/util-crypto": "^9.4.1", + "rxjs": "^7.5.5" + } + }, + "@polkadot/types-augment": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-augment/-/types-augment-8.8.2.tgz", + "integrity": "sha512-WalxIz5Z0RPp2FS0cWvhBjYL7FKzDqkIBc+r/DN4vYRQzp5JBVNJjPWWUPtq9ucEl1wiaD2vJNG34rWIYVtObg==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/types": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1" + } + }, + "@polkadot/types-codec": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-codec/-/types-codec-8.8.2.tgz", + "integrity": "sha512-p3YZU8WZIMnnSxTKpoiCPi64T/sSR7dX7ObkpvUITulE6dzXUPUvkdSVS9YlTlb4R43pZ0iSyB18vpnlpq8LYQ==", "requires": { - "@babel/runtime": "^7.13.10", - "@polkadot/metadata": "4.5.1", - "@polkadot/util": "^6.1.1", - "@polkadot/util-crypto": "^6.1.1", - "@polkadot/x-rxjs": "^6.1.1", - "@types/bn.js": "^4.11.6", - "bn.js": "^4.11.9" + "@babel/runtime": "^7.18.3", + "@polkadot/util": "^9.4.1" + } + }, + "@polkadot/types-create": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-create/-/types-create-8.8.2.tgz", + "integrity": "sha512-YMpiLCVFs2KKpvn3n24HahUzneaLKmjgwwd+QvFCooJClV/0YK22kwvlEteLO3aWPx2jy8ySSpUFn8kd/oWEAA==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/types-codec": "8.8.2", + "@polkadot/util": "^9.4.1" } }, "@polkadot/types-known": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@polkadot/types-known/-/types-known-4.5.1.tgz", - "integrity": "sha512-CY57/cMCxaaHm3/bQHi2U9QNjbJOm19Krj6J6DCrnBphHaxZXANcZpLAC9LGaQ61gLeItSPb2i9skH9BoOL+sQ==", + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-known/-/types-known-8.8.2.tgz", + "integrity": "sha512-Ywa7v7K+UIYpQM3gbl6oA0zKiriX1OJfoYBxX7BcVLKW8cWmdy2xH9W6qNqxDWGAc2LXqNLhn0uzaRxq1niCCQ==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/networks": "^9.4.1", + "@polkadot/types": "8.8.2", + "@polkadot/types-codec": "8.8.2", + "@polkadot/types-create": "8.8.2", + "@polkadot/util": "^9.4.1" + } + }, + "@polkadot/types-support": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/@polkadot/types-support/-/types-support-8.8.2.tgz", + "integrity": "sha512-z4yjN8odDgFFlhGBrJAeHX4YsUeprmBAzWDCJMBeL4C/E1yIG7RyzQryVJNb3m/galiX1Tzuuch4kqE/jABnfw==", "requires": { - "@babel/runtime": "^7.13.10", - "@polkadot/networks": "^6.1.1", - "@polkadot/types": "4.5.1", - "@polkadot/util": "^6.1.1", - "bn.js": "^4.11.9" + "@babel/runtime": "^7.18.3", + "@polkadot/util": "^9.4.1" } }, "@polkadot/util": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/util/-/util-6.11.1.tgz", - "integrity": "sha512-TEdCetr9rsdUfJZqQgX/vxLuV4XU8KMoKBMJdx+JuQ5EWemIdQkEtMBdL8k8udNGbgSNiYFA6rPppATeIxAScg==", - "requires": { - "@babel/runtime": "^7.14.6", - "@polkadot/x-textdecoder": "6.11.1", - "@polkadot/x-textencoder": "6.11.1", - "@types/bn.js": "^4.11.6", - "bn.js": "^4.11.9", - "camelcase": "^5.3.1", + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/util/-/util-9.4.1.tgz", + "integrity": "sha512-z0HcnIe3zMWyK1s09wQIwc1M8gDKygSF9tDAbC8H9KDeIRZB2ldhwWEFx/1DJGOgFFrmRfkxeC6dcDpfzQhFow==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/x-bigint": "9.4.1", + "@polkadot/x-global": "9.4.1", + "@polkadot/x-textdecoder": "9.4.1", + "@polkadot/x-textencoder": "9.4.1", + "@types/bn.js": "^5.1.0", + "bn.js": "^5.2.1", "ip-regex": "^4.3.0" } }, "@polkadot/util-crypto": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/util-crypto/-/util-crypto-6.11.1.tgz", - "integrity": "sha512-fWA1Nz17FxWJslweZS4l0Uo30WXb5mYV1KEACVzM+BSZAvG5eoiOAYX6VYZjyw6/7u53XKrWQlD83iPsg3KvZw==", - "requires": { - "@babel/runtime": "^7.14.6", - "@polkadot/networks": "6.11.1", - "@polkadot/util": "6.11.1", - "@polkadot/wasm-crypto": "^4.0.2", - "@polkadot/x-randomvalues": "6.11.1", - "base-x": "^3.0.8", - "base64-js": "^1.5.1", - "blakejs": "^1.1.1", - "bn.js": "^4.11.9", - "create-hash": "^1.2.0", - "elliptic": "^6.5.4", - "hash.js": "^1.1.7", - "js-sha3": "^0.8.0", - "scryptsy": "^2.1.0", - "tweetnacl": "^1.0.3", - "xxhashjs": "^0.2.2" + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/util-crypto/-/util-crypto-9.4.1.tgz", + "integrity": "sha512-V6xMOjdd8Kt/QmXlcDYM4WJDAmKuH4vWSlIcMmkFHnwH/NtYVdYIDZswLQHKL8gjLijPfVTHpWaJqNFhGpZJEg==", + "requires": { + "@babel/runtime": "^7.18.3", + "@noble/hashes": "1.0.0", + "@noble/secp256k1": "1.5.5", + "@polkadot/networks": "9.4.1", + "@polkadot/util": "9.4.1", + "@polkadot/wasm-crypto": "^6.1.1", + "@polkadot/x-bigint": "9.4.1", + "@polkadot/x-randomvalues": "9.4.1", + "@scure/base": "1.0.0", + "ed2curve": "^0.3.0", + "tweetnacl": "^1.0.3" + } + }, + "@polkadot/wasm-bridge": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-bridge/-/wasm-bridge-6.1.1.tgz", + "integrity": "sha512-Cy0k00VCu+HWxie+nn9GWPlSPdiZl8Id8ulSGA2FKET0jIbffmOo4e1E2FXNucfR1UPEpqov5BCF9T5YxEXZDg==", + "requires": { + "@babel/runtime": "^7.17.9" } }, "@polkadot/wasm-crypto": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto/-/wasm-crypto-4.0.2.tgz", - "integrity": "sha512-2h9FuQFkBc+B3TwSapt6LtyPvgtd0Hq9QsHW8g8FrmKBFRiiFKYRpfJKHCk0aCZzuRf9h95bQl/X6IXAIWF2ng==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto/-/wasm-crypto-6.1.1.tgz", + "integrity": "sha512-hv9RCbMYtgjCy7+FKZFnO2Afu/whax9sk6udnZqGRBRiwaNagtyliWZGrKNGvaXMIO0VyaY4jWUwSzUgPrLu1A==", "requires": { - "@babel/runtime": "^7.13.9", - "@polkadot/wasm-crypto-asmjs": "^4.0.2", - "@polkadot/wasm-crypto-wasm": "^4.0.2" + "@babel/runtime": "^7.17.9", + "@polkadot/wasm-bridge": "6.1.1", + "@polkadot/wasm-crypto-asmjs": "6.1.1", + "@polkadot/wasm-crypto-init": "6.1.1", + "@polkadot/wasm-crypto-wasm": "6.1.1", + "@polkadot/wasm-util": "6.1.1" } }, "@polkadot/wasm-crypto-asmjs": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-asmjs/-/wasm-crypto-asmjs-4.0.2.tgz", - "integrity": "sha512-hlebqtGvfjg2ZNm4scwBGVHwOwfUhy2yw5RBHmPwkccUif3sIy4SAzstpcVBIVMdAEvo746bPWEInA8zJRcgJA==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-asmjs/-/wasm-crypto-asmjs-6.1.1.tgz", + "integrity": "sha512-gG4FStVumkyRNH7WcTB+hn3EEwCssJhQyi4B1BOUt+eYYmw9xJdzIhqjzSd9b/yF2e5sRaAzfnMj2srGufsE6A==", + "requires": { + "@babel/runtime": "^7.17.9" + } + }, + "@polkadot/wasm-crypto-init": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-init/-/wasm-crypto-init-6.1.1.tgz", + "integrity": "sha512-rbBm/9FOOUjISL4gGNokjcKy2X+Af6Chaet4zlabatpImtPIAK26B2UUBGoaRUnvl/w6K3+GwBL4LuBC+CvzFw==", "requires": { - "@babel/runtime": "^7.13.9" + "@babel/runtime": "^7.17.9", + "@polkadot/wasm-bridge": "6.1.1", + "@polkadot/wasm-crypto-asmjs": "6.1.1", + "@polkadot/wasm-crypto-wasm": "6.1.1" } }, "@polkadot/wasm-crypto-wasm": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-wasm/-/wasm-crypto-wasm-4.0.2.tgz", - "integrity": "sha512-de/AfNPZ0uDKFWzOZ1rJCtaUbakGN29ks6IRYu6HZTRg7+RtqvE1rIkxabBvYgQVHIesmNwvEA9DlIkS6hYRFQ==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-wasm/-/wasm-crypto-wasm-6.1.1.tgz", + "integrity": "sha512-zkz5Ct4KfTBT+YNEA5qbsHhTV58/FAxDave8wYIOaW4TrBnFPPs+J0WBWlGFertgIhPkvjFnQC/xzRyhet9prg==", "requires": { - "@babel/runtime": "^7.13.9" + "@babel/runtime": "^7.17.9", + "@polkadot/wasm-util": "6.1.1" } }, - "@polkadot/x-fetch": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-fetch/-/x-fetch-6.11.1.tgz", - "integrity": "sha512-qJyLLnm+4SQEZ002UDz2wWnXbnnH84rIS0mLKZ5k82H4lMYY+PQflvzv6sbu463e/lgiEao+6zvWS6DSKv1Yog==", + "@polkadot/wasm-util": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@polkadot/wasm-util/-/wasm-util-6.1.1.tgz", + "integrity": "sha512-DgpLoFXMT53UKcfZ8eT2GkJlJAOh89AWO+TP6a6qeZQpvXVe5f1yR45WQpkZlgZyUP+/19+kY56GK0pQxfslqg==", "requires": { - "@babel/runtime": "^7.14.6", - "@polkadot/x-global": "6.11.1", - "@types/node-fetch": "^2.5.10", - "node-fetch": "^2.6.1" + "@babel/runtime": "^7.17.9" } }, - "@polkadot/x-global": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-global/-/x-global-6.11.1.tgz", - "integrity": "sha512-lsBK/e4KbjfieyRmnPs7bTiGbP/6EoCZz7rqD/voNS5qsJAaXgB9LR+ilubun9gK/TDpebyxgO+J19OBiQPIRw==", + "@polkadot/x-bigint": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-bigint/-/x-bigint-9.4.1.tgz", + "integrity": "sha512-KlbXboegENoyrpjj+eXfY13vsqrXgk4620zCAUhKNH622ogdvAepHbY/DpV6w0FLEC6MwN9zd5cRuDBEXVeWiw==", "requires": { - "@babel/runtime": "^7.14.6" + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1" } }, - "@polkadot/x-randomvalues": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-randomvalues/-/x-randomvalues-6.11.1.tgz", - "integrity": "sha512-2MfUfGZSOkuPt7GF5OJkPDbl4yORI64SUuKM25EGrJ22o1UyoBnPOClm9eYujLMD6BfDZRM/7bQqqoLW+NuHVw==", + "@polkadot/x-fetch": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-fetch/-/x-fetch-9.4.1.tgz", + "integrity": "sha512-CZFPZKgy09TOF5pOFRVVhGrAaAPdSMyrUSKwdO2I8DzdIE1tmjnol50dlnZja5t8zTD0n1uIY1H4CEWwc5NF/g==", + "requires": { + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1", + "@types/node-fetch": "^2.6.1", + "node-fetch": "^2.6.7" + } + }, + "@polkadot/x-global": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-global/-/x-global-9.4.1.tgz", + "integrity": "sha512-eN4oZeRdIKQeUPNN7OtH5XeYp349d8V9+gW6W0BmCfB2lTg8TDlG1Nj+Cyxpjl9DNF5CiKudTq72zr0dDSRbwA==", "requires": { - "@babel/runtime": "^7.14.6", - "@polkadot/x-global": "6.11.1" + "@babel/runtime": "^7.18.3" } }, - "@polkadot/x-rxjs": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-rxjs/-/x-rxjs-6.11.1.tgz", - "integrity": "sha512-zIciEmij7SUuXXg9g/683Irx6GogxivrQS2pgBir2DI/YZq+um52+Dqg1mqsEZt74N4KMTMnzAZAP6LJOBOMww==", + "@polkadot/x-randomvalues": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-randomvalues/-/x-randomvalues-9.4.1.tgz", + "integrity": "sha512-TLOQw3JNPgCrcq9WO2ipdeG8scsSreu3m9hwj3n7nX/QKlVzSf4G5bxJo5TW1dwcUdHwBuVox+3zgCmo+NPh+Q==", "requires": { - "@babel/runtime": "^7.14.6", - "rxjs": "^6.6.7" + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1" } }, "@polkadot/x-textdecoder": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-textdecoder/-/x-textdecoder-6.11.1.tgz", - "integrity": "sha512-DI1Ym2lyDSS/UhnTT2e9WutukevFZ0WGpzj4eotuG2BTHN3e21uYtYTt24SlyRNMrWJf5+TkZItmZeqs1nwAfQ==", + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-textdecoder/-/x-textdecoder-9.4.1.tgz", + "integrity": "sha512-yLulcgVASFUBJqrvS6Ssy0ko9teAfbu1ajH0r3Jjnqkpmmz2DJ1CS7tAktVa7THd4GHPGeKAVfxl+BbV/LZl+w==", "requires": { - "@babel/runtime": "^7.14.6", - "@polkadot/x-global": "6.11.1" + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1" } }, "@polkadot/x-textencoder": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-textencoder/-/x-textencoder-6.11.1.tgz", - "integrity": "sha512-8ipjWdEuqFo+R4Nxsc3/WW9CSEiprX4XU91a37ZyRVC4e9R1bmvClrpXmRQLVcAQyhRvG8DKOOtWbz8xM+oXKg==", + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-textencoder/-/x-textencoder-9.4.1.tgz", + "integrity": "sha512-/47wa31jBa43ULqMO60vzcJigTG+ZAGNcyT5r6hFLrQzRzc8nIBjIOD8YWtnKM92r9NvlNv2wJhdamqyU0mntg==", "requires": { - "@babel/runtime": "^7.14.6", - "@polkadot/x-global": "6.11.1" + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1" } }, "@polkadot/x-ws": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/@polkadot/x-ws/-/x-ws-6.11.1.tgz", - "integrity": "sha512-GNu4ywrMlVi0QF6QSpKwYWMK6JRK+kadgN/zEhMoH1z5h8LwpqDLv128j5WspWbQti2teCQtridjf7t2Lzoe8Q==", + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/@polkadot/x-ws/-/x-ws-9.4.1.tgz", + "integrity": "sha512-zQjVxXgHsBVn27u4bjY01cFO6XWxgv2b3MMOpNHTKTAs8SLEmFf0LcT7fBShimyyudyTeJld5pHApJ4qp1OXxA==", "requires": { - "@babel/runtime": "^7.14.6", - "@polkadot/x-global": "6.11.1", - "@types/websocket": "^1.0.3", + "@babel/runtime": "^7.18.3", + "@polkadot/x-global": "9.4.1", + "@types/websocket": "^1.0.5", "websocket": "^1.0.34" } }, + "@scure/base": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.0.0.tgz", + "integrity": "sha512-gIVaYhUsy+9s58m/ETjSJVKHhKTBMmcRb9cEV5/5dwvfDlfORjKrFsDeDHWRrm6RjcPvCLZFwGJjAjLj1gg4HA==" + }, + "@substrate/connect": { + "version": "0.7.5", + "resolved": "https://registry.npmjs.org/@substrate/connect/-/connect-0.7.5.tgz", + "integrity": "sha512-sdAZ6IGuTNxRGlH/O+6IaXvkYzZFwMK03VbQMgxUzry9dz1+JzyaNf8iOTVHxhMIUZc0h0E90JQz/hNiUYPlUw==", + "requires": { + "@substrate/connect-extension-protocol": "^1.0.0", + "@substrate/smoldot-light": "0.6.16", + "eventemitter3": "^4.0.7" + } + }, + "@substrate/connect-extension-protocol": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@substrate/connect-extension-protocol/-/connect-extension-protocol-1.0.0.tgz", + "integrity": "sha512-nFVuKdp71hMd/MGlllAOh+a2hAqt8m6J2G0aSsS/RcALZexxF9jodbFc62ni8RDtJboeOfXAHhenYOANvJKPIg==" + }, + "@substrate/smoldot-light": { + "version": "0.6.16", + "resolved": "https://registry.npmjs.org/@substrate/smoldot-light/-/smoldot-light-0.6.16.tgz", + "integrity": "sha512-Ej0ZdNPTW0EXbp45gv/5Kt/JV+c9cmRZRYAXg+EALxXPm0hW9h2QdVLm61A2PAskOGptW4wnJ1WzzruaenwAXQ==", + "requires": { + "buffer": "^6.0.1", + "pako": "^2.0.4", + "websocket": "^1.0.32" + } + }, + "@substrate/ss58-registry": { + "version": "1.22.0", + "resolved": "https://registry.npmjs.org/@substrate/ss58-registry/-/ss58-registry-1.22.0.tgz", + "integrity": "sha512-IKqrPY0B3AeIXEc5/JGgEhPZLy+SmVyQf+k0SIGcNSTqt1GLI3gQFEOFwSScJdem+iYZQUrn6YPPxC3TpdSC3A==" + }, "@types/bn.js": { - "version": "4.11.6", - "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-4.11.6.tgz", - "integrity": "sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-5.1.0.tgz", + "integrity": "sha512-QSSVYj7pYFN49kW77o2s9xTCwZ8F2xLbjLLSEVh8D2F4JUhZtPAGOFLTD+ffqksBx/u4cE/KImFjyhqCjn/LIA==", "requires": { "@types/node": "*" } }, "@types/node": { - "version": "16.0.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.0.0.tgz", - "integrity": "sha512-TmCW5HoZ2o2/z2EYi109jLqIaPIi9y/lc2LmDCWzuCi35bcaQ+OtUh6nwBiFK7SOu25FAU5+YKdqFZUwtqGSdg==" + "version": "17.0.43", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.43.tgz", + "integrity": "sha512-jnUpgw8fL9kP2iszfIDyBQtw5Mf4/XSqy0Loc1J9pI14ejL83XcCEvSf50Gs/4ET0I9VCCDoOfufQysj0S66xA==" }, "@types/node-fetch": { - "version": "2.5.10", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.5.10.tgz", - "integrity": "sha512-IpkX0AasN44hgEad0gEF/V6EgR5n69VEqPEgnmoM8GsIGro3PowbWs4tR6IhxUTyPLpOn+fiGG6nrQhcmoCuIQ==", + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.1.tgz", + "integrity": "sha512-oMqjURCaxoSIsHSr1E47QHzbmzNR5rK8McHuNb11BOM9cHcIK3Avy0s/b2JlXHoQGTYS3NsvWzV1M0iK7l0wbA==", "requires": { "@types/node": "*", "form-data": "^3.0.0" } }, "@types/websocket": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@types/websocket/-/websocket-1.0.3.tgz", - "integrity": "sha512-ZdoTSwmDsKR7l1I8fpfQtmTI/hUwlOvE3q0iyJsp4tXU0MkdrYowimDzwxjhQvxU4qjhHLd3a6ig0OXRbLgIdw==", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/websocket/-/websocket-1.0.5.tgz", + "integrity": "sha512-NbsqiNX9CnEfC1Z0Vf4mE1SgAJ07JnRYcNex7AJ9zAVzmiGHmjKFEk7O4TJIsgv2B1sLEb6owKFZrACwdYngsQ==", "requires": { "@types/node": "*" } @@ -344,7 +2607,7 @@ "asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, "balanced-match": { "version": "1.0.2", @@ -352,14 +2615,6 @@ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, - "base-x": { - "version": "3.0.8", - "resolved": "https://registry.npmjs.org/base-x/-/base-x-3.0.8.tgz", - "integrity": "sha512-Rl/1AWP4J/zRrk54hhlxH4drNxPJXYUaKffODVI53/dAsV4t9fBxyxYKAVPU1XBHxYwOWP9h9H0hM2MVw4YfJA==", - "requires": { - "safe-buffer": "^5.0.1" - } - }, "base64-js": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", @@ -371,15 +2626,10 @@ "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", "dev": true }, - "blakejs": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/blakejs/-/blakejs-1.1.1.tgz", - "integrity": "sha512-bLG6PHOCZJKNshTjGRBvET0vTciwQE6zFKOKKXPDJfwFBd4Ac0yBfPZqcGvGJap50l7ktvlpFqc2jGVaUgbJgg==" - }, "bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz", + "integrity": "sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==" }, "brace-expansion": { "version": "1.1.11", @@ -400,29 +2650,28 @@ "fill-range": "^7.0.1" } }, - "brorand": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", - "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=" - }, "browser-stdout": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", "dev": true }, - "bufferutil": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/bufferutil/-/bufferutil-4.0.3.tgz", - "integrity": "sha512-yEYTwGndELGvfXsImMBLop58eaGW+YdONi1fNjTINSY98tmMmFijBG6WXgdkfuLNt4imzQNtIE+eBp1PVpMCSw==", + "buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", "requires": { - "node-gyp-build": "^4.2.0" + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" } }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==" + "bufferutil": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/bufferutil/-/bufferutil-4.0.6.tgz", + "integrity": "sha512-jduaYOYtnio4aIAyc6UbvPCVcgq7nYpVnucyxr6eCYg/Woad9Hf/oxxBRDnGGjPfjUm6j5O/uBWhIu4iLebFaw==", + "requires": { + "node-gyp-build": "^4.3.0" + } }, "chai": { "version": "4.3.4", @@ -481,15 +2730,6 @@ "readdirp": "~3.5.0" } }, - "cipher-base": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", - "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, "cliui": { "version": "7.0.4", "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", @@ -564,23 +2804,6 @@ "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", "dev": true }, - "create-hash": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", - "requires": { - "cipher-base": "^1.0.1", - "inherits": "^2.0.1", - "md5.js": "^1.3.4", - "ripemd160": "^2.0.1", - "sha.js": "^2.4.0" - } - }, - "cuint": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/cuint/-/cuint-0.2.2.tgz", - "integrity": "sha1-QICG1AlVDCYxFVYZ6fp7ytw7mRs=" - }, "d": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", @@ -591,11 +2814,11 @@ } }, "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "requires": { - "ms": "2.0.0" + "ms": "2.1.2" } }, "decamelize": { @@ -616,7 +2839,7 @@ "delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" }, "diff": { "version": "5.0.0", @@ -624,18 +2847,12 @@ "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", "dev": true }, - "elliptic": { - "version": "6.5.4", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz", - "integrity": "sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==", + "ed2curve": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/ed2curve/-/ed2curve-0.3.0.tgz", + "integrity": "sha512-8w2fmmq3hv9rCrcI7g9hms2pMunQr1JINfcjwR9tAyZqhtyaMN991lF/ZfHfr5tzZQ8c7y7aBgZbjfbd0fjFwQ==", "requires": { - "bn.js": "^4.11.9", - "brorand": "^1.1.0", - "hash.js": "^1.0.0", - "hmac-drbg": "^1.0.1", - "inherits": "^2.0.4", - "minimalistic-assert": "^1.0.1", - "minimalistic-crypto-utils": "^1.0.1" + "tweetnacl": "1.x.x" } }, "emoji-regex": { @@ -645,19 +2862,19 @@ "dev": true }, "es5-ext": { - "version": "0.10.53", - "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", - "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", + "version": "0.10.61", + "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.61.tgz", + "integrity": "sha512-yFhIqQAzu2Ca2I4SE2Au3rxVfmohU9Y7wqGR+s7+H7krk26NXhIRAZDgqd6xqjCEFUomDEA3/Bo/7fKmIkW1kA==", "requires": { - "es6-iterator": "~2.0.3", - "es6-symbol": "~3.1.3", - "next-tick": "~1.0.0" + "es6-iterator": "^2.0.3", + "es6-symbol": "^3.1.3", + "next-tick": "^1.1.0" } }, "es6-iterator": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", - "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", + "integrity": "sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==", "requires": { "d": "1", "es5-ext": "^0.10.35", @@ -691,17 +2908,17 @@ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" }, "ext": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", - "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/ext/-/ext-1.6.0.tgz", + "integrity": "sha512-sdBImtzkq2HpkdRLtlLWDa6w4DX22ijZLKx8BMPUuKe1c5lbN6xwQDQCxSfxBQnHZ13ls/FH0MQZx/q/gr6FQg==", "requires": { - "type": "^2.0.0" + "type": "^2.5.0" }, "dependencies": { "type": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/type/-/type-2.5.0.tgz", - "integrity": "sha512-180WMDQaIMm3+7hGXWf12GtdniDEy7nYcyFMKJn/eZz/6tSLXrUN9V0wKSbMjej0I1WHWbpREDEKHtqPQa9NNw==" + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/type/-/type-2.6.0.tgz", + "integrity": "sha512-eiDBDOmkih5pMbo9OqsqPRGMljLodLcwd5XD5JbtNB0o89xZAwynY9EdCDsJU7LtcVCClu9DvM7/0Ep1hYX3EQ==" } } }, @@ -800,40 +3017,16 @@ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true }, - "hash-base": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", - "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", - "requires": { - "inherits": "^2.0.4", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - } - }, - "hash.js": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", - "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", - "requires": { - "inherits": "^2.0.3", - "minimalistic-assert": "^1.0.1" - } - }, "he": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", "dev": true }, - "hmac-drbg": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", - "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", - "requires": { - "hash.js": "^1.0.3", - "minimalistic-assert": "^1.0.0", - "minimalistic-crypto-utils": "^1.0.1" - } + "ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==" }, "inflight": { "version": "1.0.6", @@ -848,7 +3041,8 @@ "inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true }, "ip-regex": { "version": "4.3.0", @@ -900,7 +3094,7 @@ "is-typedarray": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" }, "isexe": { "version": "2.0.0", @@ -908,11 +3102,6 @@ "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", "dev": true }, - "js-sha3": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.8.0.tgz", - "integrity": "sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q==" - }, "js-yaml": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.0.0.tgz", @@ -922,6 +3111,11 @@ "argparse": "^2.0.1" } }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==" + }, "locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -931,6 +3125,11 @@ "p-locate": "^5.0.0" } }, + "lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, "log-symbols": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", @@ -940,39 +3139,19 @@ "chalk": "^4.0.0" } }, - "md5.js": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", - "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", - "requires": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, "mime-db": { - "version": "1.48.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.48.0.tgz", - "integrity": "sha512-FM3QwxV+TnZYQ2aRqhlKBMHxk10lTbMt3bBkMAp54ddrNeVSfcQYOOKuGuy3Ddrm38I04If834fOUSq1yzslJQ==" + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" }, "mime-types": { - "version": "2.1.31", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.31.tgz", - "integrity": "sha512-XGZnNzm3QvgKxa8dpzyhFTHmpP3l5YNusmne07VUOXxou9CqUqYa/HBy124RqtVh/O2pECas/MOcsDgpilPOPg==", + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "requires": { - "mime-db": "1.48.0" + "mime-db": "1.52.0" } }, - "minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" - }, - "minimalistic-crypto-utils": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", - "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=" - }, "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", @@ -1040,10 +3219,15 @@ } } }, + "mock-socket": { + "version": "9.1.5", + "resolved": "https://registry.npmjs.org/mock-socket/-/mock-socket-9.1.5.tgz", + "integrity": "sha512-3DeNIcsQixWHHKk6NdoBhWI4t1VMj5/HzfnI1rE/pLl5qKx7+gd4DNA07ehTaZ6MoUU053si6Hd+YtiM/tQZfg==" + }, "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, "nanoid": { "version": "3.1.20", @@ -1052,9 +3236,20 @@ "dev": true }, "next-tick": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", - "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz", + "integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==" + }, + "nock": { + "version": "13.2.7", + "resolved": "https://registry.npmjs.org/nock/-/nock-13.2.7.tgz", + "integrity": "sha512-R6NUw7RIPtKwgK7jskuKoEi4VFMqIHtV2Uu9K/Uegc4TA5cqe+oNMYslZcUmnVNQCTG6wcSqUBaGTDd7sq5srg==", + "requires": { + "debug": "^4.1.0", + "json-stringify-safe": "^5.0.1", + "lodash": "^4.17.21", + "propagate": "^2.0.0" + } }, "node-fetch": { "version": "2.6.7", @@ -1065,9 +3260,9 @@ } }, "node-gyp-build": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz", - "integrity": "sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg==" + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.4.0.tgz", + "integrity": "sha512-amJnQCcgtRVw9SvoebO3BKGESClrfXGCUTX9hSn1OuGQTQBOZmVd0Z0OlecpuRksKvbsUqALE8jls/ErClAPuQ==" }, "normalize-path": { "version": "3.0.0", @@ -1102,6 +3297,11 @@ "p-limit": "^3.0.2" } }, + "pako": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pako/-/pako-2.0.4.tgz", + "integrity": "sha512-v8tweI900AUkZN6heMU/4Uy4cXRc2AYNRggVmTR+dEncawDJgCdLMximOVA2p4qO57WMynangsfGRb5WD6L1Bg==" + }, "path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -1126,6 +3326,11 @@ "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", "dev": true }, + "propagate": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/propagate/-/propagate-2.0.1.tgz", + "integrity": "sha512-vGrhOavPSTz4QVNuBNdcNXePNdNMaO1xj9yBeH1ScQPjk/rhg9sSlCXPhMkFuaNNW/syTvYqsnbIJxMBfRbbag==" + }, "randombytes": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", @@ -1135,16 +3340,6 @@ "safe-buffer": "^5.1.0" } }, - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - }, "readdirp": { "version": "3.5.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", @@ -1155,9 +3350,9 @@ } }, "regenerator-runtime": { - "version": "0.13.7", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz", - "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew==" + "version": "0.13.9", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", + "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==" }, "require-directory": { "version": "2.1.1", @@ -1165,32 +3360,19 @@ "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", "dev": true }, - "ripemd160": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", - "requires": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1" - } - }, "rxjs": { - "version": "6.6.7", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz", - "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==", + "version": "7.5.5", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.5.5.tgz", + "integrity": "sha512-sy+H0pQofO95VDmFLzyaw9xNJU4KTRSwQIGM6+iG3SypAtCiLDzpeG8sJrNCWn2Up9km+KhkvTdbkrdy+yzZdw==", "requires": { - "tslib": "^1.9.0" + "tslib": "^2.1.0" } }, "safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "scryptsy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/scryptsy/-/scryptsy-2.1.0.tgz", - "integrity": "sha512-1CdSqHQowJBnMAFyPEBRfqag/YP9OF394FV+4YREIJX4ljD7OxvQRDayyoyyCk+senRjSkP6VnUNQmVQqB6g7w==" + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true }, "serialize-javascript": { "version": "5.0.1", @@ -1201,15 +3383,6 @@ "randombytes": "^2.1.0" } }, - "sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, "string-width": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", @@ -1220,14 +3393,6 @@ "strip-ansi": "^4.0.0" } }, - "string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "requires": { - "safe-buffer": "~5.2.0" - } - }, "strip-ansi": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", @@ -1264,12 +3429,12 @@ "tr46": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=" + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" }, "tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz", + "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" }, "tweetnacl": { "version": "1.0.3", @@ -1296,22 +3461,17 @@ } }, "utf-8-validate": { - "version": "5.0.5", - "resolved": "https://registry.npmjs.org/utf-8-validate/-/utf-8-validate-5.0.5.tgz", - "integrity": "sha512-+pnxRYsS/axEpkrrEpzYfNZGXp0IjC/9RIxwM5gntY4Koi8SHmUGSfxfWqxZdRxrtaoVstuOzUp/rbs3JSPELQ==", + "version": "5.0.9", + "resolved": "https://registry.npmjs.org/utf-8-validate/-/utf-8-validate-5.0.9.tgz", + "integrity": "sha512-Yek7dAy0v3Kl0orwMlvi7TPtiCNrdfHNd7Gcc/pLq4BLXqfAmd0J7OWMizUQnTTJsyjKn02mU7anqwfmUP4J8Q==", "requires": { - "node-gyp-build": "^4.2.0" + "node-gyp-build": "^4.3.0" } }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" - }, "webidl-conversions": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE=" + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" }, "websocket": { "version": "1.0.34", @@ -1324,12 +3484,27 @@ "typedarray-to-buffer": "^3.1.5", "utf-8-validate": "^5.0.2", "yaeti": "^0.0.6" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + } } }, "whatwg-url": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha1-lmRU6HZUYuN2RNNib2dCzotwll0=", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", "requires": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" @@ -1410,14 +3585,6 @@ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", "dev": true }, - "xxhashjs": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/xxhashjs/-/xxhashjs-0.2.2.tgz", - "integrity": "sha512-AkTuIuVTET12tpsVIQo+ZU6f/qDmKuRUcjaqR+OIvm+aCBsZ95i7UVY5WJ9TMsSaZ0DA2WxoZ4acu0sPH+OKAw==", - "requires": { - "cuint": "^0.2.2" - } - }, "y18n": { "version": "5.0.8", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", @@ -1427,7 +3594,7 @@ "yaeti": { "version": "0.0.6", "resolved": "https://registry.npmjs.org/yaeti/-/yaeti-0.0.6.tgz", - "integrity": "sha1-8m9ITXJoTPQr7ft2lwqhYI+/lXc=" + "integrity": "sha512-MvQa//+KcZCUkBTIC9blM+CU9J2GzuTytsOUwf2lidtvkx/6gnEp1QvJv34t9vdjhFmha/mUiNDbN0D0mJWdug==" }, "yargs": { "version": "16.2.0", diff --git a/tests/polkadotjs_test/package.json b/tests/polkadotjs_test/package.json index 153702c629..715a6b8a67 100644 --- a/tests/polkadotjs_test/package.json +++ b/tests/polkadotjs_test/package.json @@ -9,7 +9,7 @@ "author": "", "license": "ISC", "dependencies": { - "@polkadot/api": "4.5.1" + "@polkadot/api": "8.8.2" }, "devDependencies": { "chai": "^4.2.0", diff --git a/tests/polkadotjs_test/start_polkadotjs_test.go b/tests/polkadotjs_test/start_polkadotjs_test.go index 7886a18d2c..cc8ef678fa 100644 --- a/tests/polkadotjs_test/start_polkadotjs_test.go +++ b/tests/polkadotjs_test/start_polkadotjs_test.go @@ -49,6 +49,7 @@ func TestStartGossamerAndPolkadotAPI(t *testing.T) { tomlConfig.Init.Genesis = libutils.GetDevGenesisSpecPathTest(t) tomlConfig.Core.BABELead = true tomlConfig.RPC.WS = true + tomlConfig.RPC.Modules = []string{"system", "author", "chain", "state", "dev", "rpc", "grandpa"} n := node.New(t, tomlConfig) ctx, cancel := context.WithCancel(context.Background()) diff --git a/tests/polkadotjs_test/test/test-polkadot.js b/tests/polkadotjs_test/test/test-polkadot.js index 3d0909209a..483d5d1244 100644 --- a/tests/polkadotjs_test/test/test-polkadot.js +++ b/tests/polkadotjs_test/test/test-polkadot.js @@ -63,7 +63,7 @@ describe('Testing polkadot.js/api calls:', function () { it('call api.libraryInfo', async function () { const libraryInfo = await api.libraryInfo; expect(libraryInfo).to.be.not.null; - expect(libraryInfo).to.be.equal('@polkadot/api v4.5.1'); + expect(libraryInfo).to.be.equal('@polkadot/api v8.8.2'); }); }); describe('upgrade runtime', () => { @@ -98,8 +98,8 @@ describe('Testing polkadot.js/api calls:', function () { }); }) }); - - describe('api query', () => { + //TODO: remove skip when rpc.state.queryStorage is fixed (in PR#2505) + describe.skip('api query', () => { it('call api.query.timestamp.now()', async function () { const timestamp = await api.query.timestamp.now(); expect(timestamp).to.be.not.undefined; @@ -180,5 +180,12 @@ describe('Testing polkadot.js/api calls:', function () { expect(transfer).to.have.lengthOf(32); }); }); - + describe('api grandpa', () => { + it('call api.rpc.grandpa.proveFinality', async function () { + const proveBlockNumber = 0; + const finality = await api.rpc.grandpa.proveFinality(proveBlockNumber); + expect(finality).to.be.not.null; + expect(finality).to.be.ownProperty('registry') + }); + }); }); From 792e53fe674b6d177e2e95ca9df3abbcbeba550f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Thu, 30 Jun 2022 09:32:03 -0400 Subject: [PATCH 25/48] fix: ensure we convert the `uint` type (#2626) --- lib/common/variadic/uint32OrHash.go | 5 +++++ lib/common/variadic/uint32OrHash_test.go | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/lib/common/variadic/uint32OrHash.go b/lib/common/variadic/uint32OrHash.go index 6bf597505c..4a4c7a4025 100644 --- a/lib/common/variadic/uint32OrHash.go +++ b/lib/common/variadic/uint32OrHash.go @@ -23,6 +23,10 @@ func NewUint32OrHash(value interface{}) (*Uint32OrHash, error) { return &Uint32OrHash{ value: uint32(v), }, nil + case uint: + return &Uint32OrHash{ + value: uint32(v), + }, nil case uint32: return &Uint32OrHash{ value: v, @@ -98,6 +102,7 @@ func (x *Uint32OrHash) IsUint32() bool { if x == nil { return false } + _, is := x.value.(uint32) return is } diff --git a/lib/common/variadic/uint32OrHash_test.go b/lib/common/variadic/uint32OrHash_test.go index 65f27880a9..f5a77d4916 100644 --- a/lib/common/variadic/uint32OrHash_test.go +++ b/lib/common/variadic/uint32OrHash_test.go @@ -25,6 +25,10 @@ func TestNewUint32OrHash(t *testing.T) { require.NoError(t, err) require.Equal(t, uint32(num), res.Value()) + res, err = NewUint32OrHash(uint(num)) + require.NoError(t, err) + require.Equal(t, uint32(num), res.Value()) + res, err = NewUint32OrHash(uint32(num)) require.NoError(t, err) require.Equal(t, uint32(num), res.Value()) From d3282f70373b90dd09b56a3e9d3a6f2d303c61bc Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Mon, 4 Jul 2022 13:52:19 -0400 Subject: [PATCH 26/48] chore(trie): refactor header encoding to prepare for upgrades (#2530) - Declare node variants as bits+bit mask instead of just bits or enums - `encodeHeader` encodes the first header byte + extra partial key length bytes instead of the convoluted/not-modular `encodeHeader` + `encodeKeyLength` - `decodeHeader` gets the variant + partial key length, instead of convoluted in-line header code in `Decode` + further header decoding in `decodeLeaf` and `decodeBranch` - Find the node variant header partial key length mask dynamically from the node variant header bit mask, instead of using a constant - Use `uint16` for `partialKeyLength` as specified in the specification - Clarify codec documentation in `internal/trie/node/README.md` - Fixes: - `decodeKey` last byte maximum value check fixed - `decodeKey` accepts a key of length up to `65535` - Allow to encode partial key of length `65535` - Other refactoring: - Panic for programming errors cases - Revise some of the error wrapping on errors changed in this commit - Remove single byte sync pool (slower than stack byte slice of length 1) --- internal/trie/node/README.md | 31 +++ internal/trie/node/decode.go | 92 ++++---- internal/trie/node/decode_test.go | 132 ++++++----- internal/trie/node/encode.go | 4 +- internal/trie/node/encode_decode_test.go | 8 +- internal/trie/node/encode_doc.go | 28 --- internal/trie/node/encode_test.go | 50 ++-- internal/trie/node/header.go | 153 ++++++++++-- internal/trie/node/header_test.go | 286 ++++++++++++++++++++--- internal/trie/node/key.go | 79 +------ internal/trie/node/key_test.go | 194 ++------------- internal/trie/node/node.go | 2 + internal/trie/node/variants.go | 26 +++ internal/trie/pools/pools.go | 9 - 14 files changed, 626 insertions(+), 468 deletions(-) create mode 100644 internal/trie/node/README.md delete mode 100644 internal/trie/node/encode_doc.go create mode 100644 internal/trie/node/variants.go diff --git a/internal/trie/node/README.md b/internal/trie/node/README.md new file mode 100644 index 0000000000..cae00b0ff0 --- /dev/null +++ b/internal/trie/node/README.md @@ -0,0 +1,31 @@ +# Trie node + +Package node defines the `Node` structure with methods to be used in the modified Merkle-Patricia Radix-16 trie. + +## Codec + +The following sub-sections precise the encoding of a node. +This encoding is formally described in [the Polkadot specification](https://spec.polkadot.network/#sect-state-storage). + +### Header + +Each node encoding has a header of one or more bytes. +The first byte contains the node variant and some or all of the partial key length of the node. +If the partial key length cannot fit in the first byte, additional bytes are added to the header to represent the total partial key length. + +### Partial key + +The header is then concatenated with the partial key of the node, encoded as Little Endian bytes. + +### Remaining bytes + +The remaining bytes appended depend on the node variant. + +- For leaves, the SCALE-encoded leaf value is appended. +- For branches, the following elements are concatenated in this order and appended to the previous header+partial key: + - Children bitmap (2 bytes) + - SCALE-encoded node value + - Hash(Encoding(Child[0])) + - Hash(Encoding(Child[1])) + - ... + - Hash(Encoding(Child[15])) diff --git a/internal/trie/node/decode.go b/internal/trie/node/decode.go index cb6930bbee..2dac9d3eeb 100644 --- a/internal/trie/node/decode.go +++ b/internal/trie/node/decode.go @@ -9,63 +9,68 @@ import ( "fmt" "io" - "github.com/ChainSafe/gossamer/internal/trie/pools" "github.com/ChainSafe/gossamer/pkg/scale" ) var ( - ErrReadHeaderByte = errors.New("cannot read header byte") - ErrUnknownNodeType = errors.New("unknown node type") + // ErrDecodeValue is defined since no sentinel error is defined + // in the scale package. + // TODO remove once the following issue is done: + // https://github.com/ChainSafe/gossamer/issues/2631 . ErrDecodeValue = errors.New("cannot decode value") ErrReadChildrenBitmap = errors.New("cannot read children bitmap") - ErrDecodeChildHash = errors.New("cannot decode child hash") + // ErrDecodeChildHash is defined since no sentinel error is defined + // in the scale package. + // TODO remove once the following issue is done: + // https://github.com/ChainSafe/gossamer/issues/2631 . + ErrDecodeChildHash = errors.New("cannot decode child hash") ) // Decode decodes a node from a reader. +// The encoding format is documented in the README.md +// of this package, and specified in the Polkadot spec at +// https://spec.polkadot.network/#sect-state-storage // For branch decoding, see the comments on decodeBranch. // For leaf decoding, see the comments on decodeLeaf. func Decode(reader io.Reader) (n *Node, err error) { - buffer := pools.SingleByteBuffers.Get().(*bytes.Buffer) - defer pools.SingleByteBuffers.Put(buffer) - oneByteBuf := buffer.Bytes() - _, err = reader.Read(oneByteBuf) + variant, partialKeyLength, err := decodeHeader(reader) if err != nil { - return nil, fmt.Errorf("%w: %s", ErrReadHeaderByte, err) + return nil, fmt.Errorf("decoding header: %w", err) } - header := oneByteBuf[0] - nodeTypeHeaderByte := header >> 6 - switch nodeTypeHeaderByte { - case leafHeader: - n, err = decodeLeaf(reader, header) + switch variant { + case leafVariant.bits: + n, err = decodeLeaf(reader, partialKeyLength) if err != nil { return nil, fmt.Errorf("cannot decode leaf: %w", err) } return n, nil - case branchHeader, branchWithValueHeader: - n, err = decodeBranch(reader, header) + case branchVariant.bits, branchWithValueVariant.bits: + n, err = decodeBranch(reader, variant, partialKeyLength) if err != nil { return nil, fmt.Errorf("cannot decode branch: %w", err) } return n, nil default: - return nil, fmt.Errorf("%w: %d", ErrUnknownNodeType, nodeTypeHeaderByte) + // this is a programming error, an unknown node variant + // should be caught by decodeHeader. + panic(fmt.Sprintf("not implemented for node variant %08b", variant)) } } -// decodeBranch reads and decodes from a reader with the encoding specified in internal/trie/node/encode_doc.go. +// decodeBranch reads from a reader and decodes to a node branch. // Note that since the encoded branch stores the hash of the children nodes, we are not // reconstructing the child nodes from the encoding. This function instead stubs where the // children are known to be with an empty leaf. The children nodes hashes are then used to // find other values using the persistent database. -func decodeBranch(reader io.Reader, header byte) (node *Node, err error) { +func decodeBranch(reader io.Reader, variant byte, partialKeyLength uint16) ( + node *Node, err error) { node = &Node{ Dirty: true, Children: make([]*Node, ChildrenCapacity), } - keyLen := header & keyLenOffset - node.Key, err = decodeKey(reader, keyLen) + node.Key, err = decodeKey(reader, partialKeyLength) if err != nil { return nil, fmt.Errorf("cannot decode key: %w", err) } @@ -78,18 +83,14 @@ func decodeBranch(reader io.Reader, header byte) (node *Node, err error) { sd := scale.NewDecoder(reader) - nodeType := header >> 6 - if nodeType == branchWithValueHeader { - var value []byte - // branch w/ value - err := sd.Decode(&value) + if variant == branchWithValueVariant.bits { + err := sd.Decode(&node.Value) if err != nil { return nil, fmt.Errorf("%w: %s", ErrDecodeValue, err) } - node.Value = value } - for i := 0; i < 16; i++ { + for i := 0; i < ChildrenCapacity; i++ { if (childrenBitmap[i/8]>>(i%8))&1 != 1 { continue } @@ -101,37 +102,38 @@ func decodeBranch(reader io.Reader, header byte) (node *Node, err error) { ErrDecodeChildHash, i, err) } - // Handle inlined leaf nodes. const hashLength = 32 - nodeTypeHeaderByte := hash[0] >> 6 - if nodeTypeHeaderByte == leafHeader && len(hash) < hashLength { - leaf, err := decodeLeaf(bytes.NewReader(hash[1:]), hash[0]) - if err != nil { - return nil, fmt.Errorf("%w: at index %d: %s", - ErrDecodeValue, i, err) + childNode := &Node{ + HashDigest: hash, + Dirty: true, + } + if len(hash) < hashLength { + // Handle inlined nodes + reader = bytes.NewReader(hash) + variant, partialKeyLength, err := decodeHeader(reader) + if err == nil && variant == leafVariant.bits { + childNode, err = decodeLeaf(reader, partialKeyLength) + if err != nil { + return nil, fmt.Errorf("%w: at index %d: %s", + ErrDecodeValue, i, err) + } } - node.Descendants++ - node.Children[i] = leaf - continue } node.Descendants++ - node.Children[i] = &Node{ - HashDigest: hash, - } + node.Children[i] = childNode } return node, nil } -// decodeLeaf reads and decodes from a reader with the encoding specified in lib/trie/node/encode_doc.go. -func decodeLeaf(reader io.Reader, header byte) (node *Node, err error) { +// decodeLeaf reads from a reader and decodes to a leaf node. +func decodeLeaf(reader io.Reader, partialKeyLength uint16) (node *Node, err error) { node = &Node{ Dirty: true, } - keyLen := header & keyLenOffset - node.Key, err = decodeKey(reader, keyLen) + node.Key, err = decodeKey(reader, partialKeyLength) if err != nil { return nil, fmt.Errorf("cannot decode key: %w", err) } diff --git a/internal/trie/node/decode_test.go b/internal/trie/node/decode_test.go index 6a0a916b81..2e8e0967e2 100644 --- a/internal/trie/node/decode_test.go +++ b/internal/trie/node/decode_test.go @@ -42,28 +42,29 @@ func Test_Decode(t *testing.T) { }{ "no data": { reader: bytes.NewReader(nil), - errWrapped: ErrReadHeaderByte, - errMessage: "cannot read header byte: EOF", + errWrapped: io.EOF, + errMessage: "decoding header: reading header byte: EOF", }, - "unknown node type": { + "unknown node variant": { reader: bytes.NewReader([]byte{0}), - errWrapped: ErrUnknownNodeType, - errMessage: "unknown node type: 0", + errWrapped: ErrVariantUnknown, + errMessage: "decoding header: decoding header byte: node variant is unknown: for header byte 00000000", }, "leaf decoding error": { reader: bytes.NewReader([]byte{ - 65, // node type 1 (leaf) and key length 1 + leafVariant.bits | 1, // key length 1 // missing key data byte }), - errWrapped: ErrReadKeyData, - errMessage: "cannot decode leaf: cannot decode key: cannot read key data: EOF", + errWrapped: io.EOF, + errMessage: "cannot decode leaf: cannot decode key: " + + "reading from reader: EOF", }, "leaf success": { reader: bytes.NewReader( append( []byte{ - 65, // node type 1 (leaf) and key length 1 - 9, // key data + leafVariant.bits | 1, // key length 1 + 9, // key data }, scaleEncodeBytes(t, 1, 2, 3)..., ), @@ -76,18 +77,19 @@ func Test_Decode(t *testing.T) { }, "branch decoding error": { reader: bytes.NewReader([]byte{ - 129, // node type 2 (branch without value) and key length 1 + branchVariant.bits | 1, // key length 1 // missing key data byte }), - errWrapped: ErrReadKeyData, - errMessage: "cannot decode branch: cannot decode key: cannot read key data: EOF", + errWrapped: io.EOF, + errMessage: "cannot decode branch: cannot decode key: " + + "reading from reader: EOF", }, "branch success": { reader: bytes.NewReader( []byte{ - 129, // node type 2 (branch without value) and key length 1 - 9, // key data - 0, 0, // no children bitmap + branchVariant.bits | 1, // key length 1 + 9, // key data + 0, 0, // no children bitmap }, ), n: &Node{ @@ -99,7 +101,7 @@ func Test_Decode(t *testing.T) { "branch with two inlined children": { reader: bytes.NewReader( []byte{ - 158, // node type 2 (branch w/o value) and key length 30 + branchVariant.bits | 30, // key length 30 // Key data start 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, @@ -178,28 +180,31 @@ func Test_decodeBranch(t *testing.T) { t.Parallel() testCases := map[string]struct { - reader io.Reader - header byte - branch *Node - errWrapped error - errMessage string + reader io.Reader + variant byte + partialKeyLength uint16 + branch *Node + errWrapped error + errMessage string }{ "key decoding error": { reader: bytes.NewBuffer([]byte{ // missing key data byte }), - header: 129, // node type 2 (branch without value) and key length 1 - errWrapped: ErrReadKeyData, - errMessage: "cannot decode key: cannot read key data: EOF", + variant: branchVariant.bits, + partialKeyLength: 1, + errWrapped: io.EOF, + errMessage: "cannot decode key: reading from reader: EOF", }, "children bitmap read error": { reader: bytes.NewBuffer([]byte{ 9, // key data // missing children bitmap 2 bytes }), - header: 129, // node type 2 (branch without value) and key length 1 - errWrapped: ErrReadChildrenBitmap, - errMessage: "cannot read children bitmap: EOF", + variant: branchVariant.bits, + partialKeyLength: 1, + errWrapped: ErrReadChildrenBitmap, + errMessage: "cannot read children bitmap: EOF", }, "children decoding error": { reader: bytes.NewBuffer([]byte{ @@ -207,21 +212,21 @@ func Test_decodeBranch(t *testing.T) { 0, 4, // children bitmap // missing children scale encoded data }), - header: 129, // node type 2 (branch without value) and key length 1 - errWrapped: ErrDecodeChildHash, - errMessage: "cannot decode child hash: at index 10: EOF", + variant: branchVariant.bits, + partialKeyLength: 1, + errWrapped: ErrDecodeChildHash, + errMessage: "cannot decode child hash: at index 10: EOF", }, - "success node type 2": { + "success for branch variant": { reader: bytes.NewBuffer( concatByteSlices([][]byte{ - { - 9, // key data - 0, 4, // children bitmap - }, + {9}, // key data + {0, 4}, // children bitmap scaleEncodeBytes(t, 1, 2, 3, 4, 5), // child hash }), ), - header: 129, // node type 2 (branch without value) and key length 1 + variant: branchVariant.bits, + partialKeyLength: 1, branch: &Node{ Key: []byte{9}, Children: padRightChildren([]*Node{ @@ -229,13 +234,14 @@ func Test_decodeBranch(t *testing.T) { nil, nil, nil, nil, nil, { HashDigest: []byte{1, 2, 3, 4, 5}, + Dirty: true, }, }), Dirty: true, Descendants: 1, }, }, - "value decoding error for node type 3": { + "value decoding error for branch with value variant": { reader: bytes.NewBuffer( concatByteSlices([][]byte{ {9}, // key data @@ -243,11 +249,12 @@ func Test_decodeBranch(t *testing.T) { // missing encoded branch value }), ), - header: 193, // node type 3 (branch with value) and key length 1 - errWrapped: ErrDecodeValue, - errMessage: "cannot decode value: EOF", + variant: branchWithValueVariant.bits, + partialKeyLength: 1, + errWrapped: ErrDecodeValue, + errMessage: "cannot decode value: EOF", }, - "success node type 3": { + "success for branch with value": { reader: bytes.NewBuffer( concatByteSlices([][]byte{ {9}, // key data @@ -256,7 +263,8 @@ func Test_decodeBranch(t *testing.T) { scaleEncodeBytes(t, 1, 2, 3, 4, 5), // child hash }), ), - header: 193, // node type 3 (branch with value) and key length 1 + variant: branchWithValueVariant.bits, + partialKeyLength: 1, branch: &Node{ Key: []byte{9}, Value: []byte{7, 8, 9}, @@ -265,6 +273,7 @@ func Test_decodeBranch(t *testing.T) { nil, nil, nil, nil, nil, { HashDigest: []byte{1, 2, 3, 4, 5}, + Dirty: true, }, }), Dirty: true, @@ -278,7 +287,8 @@ func Test_decodeBranch(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - branch, err := decodeBranch(testCase.reader, testCase.header) + branch, err := decodeBranch(testCase.reader, + testCase.variant, testCase.partialKeyLength) assert.ErrorIs(t, err, testCase.errWrapped) if err != nil { @@ -293,35 +303,39 @@ func Test_decodeLeaf(t *testing.T) { t.Parallel() testCases := map[string]struct { - reader io.Reader - header byte - leaf *Node - errWrapped error - errMessage string + reader io.Reader + variant byte + partialKeyLength uint16 + leaf *Node + errWrapped error + errMessage string }{ "key decoding error": { reader: bytes.NewBuffer([]byte{ // missing key data byte }), - header: 65, // node type 1 (leaf) and key length 1 - errWrapped: ErrReadKeyData, - errMessage: "cannot decode key: cannot read key data: EOF", + variant: leafVariant.bits, + partialKeyLength: 1, + errWrapped: io.EOF, + errMessage: "cannot decode key: reading from reader: EOF", }, "value decoding error": { reader: bytes.NewBuffer([]byte{ 9, // key data 255, 255, // bad value data }), - header: 65, // node type 1 (leaf) and key length 1 - errWrapped: ErrDecodeValue, - errMessage: "cannot decode value: could not decode invalid integer", + variant: leafVariant.bits, + partialKeyLength: 1, + errWrapped: ErrDecodeValue, + errMessage: "cannot decode value: could not decode invalid integer", }, "zero value": { reader: bytes.NewBuffer([]byte{ 9, // key data // missing value data }), - header: 65, // node type 1 (leaf) and key length 1 + variant: leafVariant.bits, + partialKeyLength: 1, leaf: &Node{ Key: []byte{9}, Dirty: true, @@ -334,7 +348,8 @@ func Test_decodeLeaf(t *testing.T) { scaleEncodeBytes(t, 1, 2, 3, 4, 5), // value data }), ), - header: 65, // node type 1 (leaf) and key length 1 + variant: leafVariant.bits, + partialKeyLength: 1, leaf: &Node{ Key: []byte{9}, Value: []byte{1, 2, 3, 4, 5}, @@ -348,7 +363,8 @@ func Test_decodeLeaf(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - leaf, err := decodeLeaf(testCase.reader, testCase.header) + leaf, err := decodeLeaf(testCase.reader, + testCase.partialKeyLength) assert.ErrorIs(t, err, testCase.errWrapped) if err != nil { diff --git a/internal/trie/node/encode.go b/internal/trie/node/encode.go index c7890e16a8..5bea739c0c 100644 --- a/internal/trie/node/encode.go +++ b/internal/trie/node/encode.go @@ -12,7 +12,9 @@ import ( ) // Encode encodes the node to the buffer given. -// The encoding format is documented in encode_doc.go. +// The encoding format is documented in the README.md +// of this package, and specified in the Polkadot spec at +// https://spec.polkadot.network/#sect-state-storage func (n *Node) Encode(buffer Buffer) (err error) { if !n.Dirty && n.Encoding != nil { _, err = buffer.Write(n.Encoding) diff --git a/internal/trie/node/encode_decode_test.go b/internal/trie/node/encode_decode_test.go index 8c6757b4ef..c92a1a2751 100644 --- a/internal/trie/node/encode_decode_test.go +++ b/internal/trie/node/encode_decode_test.go @@ -93,6 +93,7 @@ func Test_Branch_Encode_Decode(t *testing.T) { 14, 15, 16, 17, 10, 11, 12, 13, }, + Dirty: true, }, }), }, @@ -109,6 +110,7 @@ func Test_Branch_Encode_Decode(t *testing.T) { 21, 186, 226, 204, 145, 132, 5, 39, 204, }, + Dirty: true, }, }), Dirty: true, @@ -127,12 +129,10 @@ func Test_Branch_Encode_Decode(t *testing.T) { err := testCase.branchToEncode.Encode(buffer) require.NoError(t, err) - oneBuffer := make([]byte, 1) - _, err = buffer.Read(oneBuffer) + variant, partialKeyLength, err := decodeHeader(buffer) require.NoError(t, err) - header := oneBuffer[0] - resultBranch, err := decodeBranch(buffer, header) + resultBranch, err := decodeBranch(buffer, variant, partialKeyLength) require.NoError(t, err) assert.Equal(t, testCase.branchDecoded, resultBranch) diff --git a/internal/trie/node/encode_doc.go b/internal/trie/node/encode_doc.go deleted file mode 100644 index 1a8b6a1c0a..0000000000 --- a/internal/trie/node/encode_doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package node - -//nolint:lll -// Modified Merkle-Patricia Trie -// See https://github.com/w3f/polkadot-spec/blob/master/runtime-environment-spec/polkadot_re_spec.pdf for the full specification. -// -// Note that for the following definitions, `|` denotes concatenation -// -// Branch encoding: -// NodeHeader | Extra partial key length | Partial Key | Value -// `NodeHeader` is a byte such that: -// most significant two bits of `NodeHeader`: 10 if branch w/o value, 11 if branch w/ value -// least significant six bits of `NodeHeader`: if len(key) > 62, 0x3f, otherwise len(key) -// `Extra partial key length` is included if len(key) > 63 and consists of the remaining key length -// `Partial Key` is the branch's key -// `Value` is: Children Bitmap | SCALE Branch node Value | Hash(Enc(Child[i_1])) | Hash(Enc(Child[i_2])) | ... | Hash(Enc(Child[i_n])) -// -// Leaf encoding: -// NodeHeader | Extra partial key length | Partial Key | Value -// `NodeHeader` is a byte such that: -// most significant two bits of `NodeHeader`: 01 -// least significant six bits of `NodeHeader`: if len(key) > 62, 0x3f, otherwise len(key) -// `Extra partial key length` is included if len(key) > 63 and consists of the remaining key length -// `Partial Key` is the leaf's key -// `Value` is the leaf's SCALE encoded value diff --git a/internal/trie/node/encode_test.go b/internal/trie/node/encode_test.go index e57c13902b..ea6a4fb47e 100644 --- a/internal/trie/node/encode_test.go +++ b/internal/trie/node/encode_test.go @@ -59,15 +59,16 @@ func Test_Node_Encode(t *testing.T) { }, "leaf header encoding error": { node: &Node{ - Key: make([]byte, 63+(1<<16)), + Key: make([]byte, 1), }, writes: []writeCall{ { - written: []byte{127}, + written: []byte{leafVariant.bits | 1}, + err: errTest, }, }, - wrappedErr: ErrPartialKeyTooBig, - errMessage: "cannot encode header: partial key length cannot be larger than or equal to 2^16: 65536", + wrappedErr: errTest, + errMessage: "cannot encode header: test error", }, "leaf buffer write error for encoded key": { node: &Node{ @@ -75,10 +76,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { - written: []byte{67}, + written: []byte{leafVariant.bits | 3}, // partial key length 3 }, { - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, err: errTest, }, }, @@ -92,10 +93,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { - written: []byte{67}, + written: []byte{leafVariant.bits | 3}, // partial key length 3 }, { - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { written: []byte{12, 4, 5, 6}, @@ -112,10 +113,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { - written: []byte{67}, + written: []byte{leafVariant.bits | 3}, // partial key length 3 }, { - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { written: []byte{12, 4, 5, 6}, @@ -153,15 +154,16 @@ func Test_Node_Encode(t *testing.T) { "branch header encoding error": { node: &Node{ Children: make([]*Node, ChildrenCapacity), - Key: make([]byte, 63+(1<<16)), + Key: make([]byte, 1), }, writes: []writeCall{ { // header - written: []byte{191}, + written: []byte{branchVariant.bits | 1}, // partial key length 1 + err: errTest, }, }, - wrappedErr: ErrPartialKeyTooBig, - errMessage: "cannot encode header: partial key length cannot be larger than or equal to 2^16: 65536", + wrappedErr: errTest, + errMessage: "cannot encode header: test error", }, "buffer write error for encoded key": { node: &Node{ @@ -171,10 +173,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, err: errTest, }, }, @@ -192,10 +194,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { // children bitmap written: []byte{136, 0}, @@ -216,10 +218,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { // children bitmap written: []byte{136, 0}, @@ -243,10 +245,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { // children bitmap written: []byte{136, 0}, @@ -275,10 +277,10 @@ func Test_Node_Encode(t *testing.T) { }, writes: []writeCall{ { // header - written: []byte{195}, + written: []byte{branchWithValueVariant.bits | 3}, // partial key length 3 }, { // key LE - written: []byte{1, 35}, + written: []byte{0x01, 0x23}, }, { // children bitmap written: []byte{136, 0}, diff --git a/internal/trie/node/header.go b/internal/trie/node/header.go index 5177b6f10c..033c5e84e7 100644 --- a/internal/trie/node/header.go +++ b/internal/trie/node/header.go @@ -4,44 +4,151 @@ package node import ( + "errors" + "fmt" "io" ) -const ( - leafHeader byte = 1 // 01 - branchHeader byte = 2 // 10 - branchWithValueHeader byte = 3 // 11 -) - -const ( - keyLenOffset = 0x3f - nodeHeaderShift = 6 -) - // encodeHeader writes the encoded header for the node. func encodeHeader(node *Node, writer io.Writer) (err error) { - var header byte + partialKeyLength := len(node.Key) + if partialKeyLength > int(maxPartialKeyLength) { + panic(fmt.Sprintf("partial key length is too big: %d", partialKeyLength)) + } + + // Merge variant byte and partial key length together + var variant variant if node.Type() == Leaf { - header = leafHeader + variant = leafVariant } else if node.Value == nil { - header = branchHeader + variant = branchVariant } else { - header = branchWithValueHeader + variant = branchWithValueVariant } - header <<= nodeHeaderShift - if len(node.Key) < keyLenOffset { - header |= byte(len(node.Key)) - _, err = writer.Write([]byte{header}) + buffer := make([]byte, 1) + buffer[0] = variant.bits + partialKeyLengthMask := ^variant.mask + + if partialKeyLength < int(partialKeyLengthMask) { + // Partial key length fits in header byte + buffer[0] |= byte(partialKeyLength) + _, err = writer.Write(buffer) return err } - header = header | keyLenOffset - _, err = writer.Write([]byte{header}) + // Partial key length does not fit in header byte only + buffer[0] |= partialKeyLengthMask + partialKeyLength -= int(partialKeyLengthMask) + _, err = writer.Write(buffer) if err != nil { return err } - err = encodeKeyLength(len(node.Key), writer) - return err + for { + buffer[0] = 255 + if partialKeyLength < 255 { + buffer[0] = byte(partialKeyLength) + } + + _, err = writer.Write(buffer) + if err != nil { + return err + } + + partialKeyLength -= int(buffer[0]) + + if buffer[0] < 255 { + break + } + } + + return nil +} + +var ( + ErrPartialKeyTooBig = errors.New("partial key length cannot be larger than 2^16") +) + +func decodeHeader(reader io.Reader) (variant byte, + partialKeyLength uint16, err error) { + buffer := make([]byte, 1) + _, err = reader.Read(buffer) + if err != nil { + return 0, 0, fmt.Errorf("reading header byte: %w", err) + } + + variant, partialKeyLengthHeader, partialKeyLengthHeaderMask, + err := decodeHeaderByte(buffer[0]) + if err != nil { + return 0, 0, fmt.Errorf("decoding header byte: %w", err) + } + + partialKeyLength = uint16(partialKeyLengthHeader) + if partialKeyLengthHeader < partialKeyLengthHeaderMask { + // partial key length is contained in the first byte. + return variant, partialKeyLength, nil + } + + // the partial key length header byte is equal to its maximum + // possible value; this means the partial key length is greater + // than this (0 to 2^6 - 1 = 63) maximum value, and we need to + // accumulate the next bytes from the reader to get the full + // partial key length. + // Specification: https://spec.polkadot.network/#defn-node-header + var previousKeyLength uint16 // used to track an eventual overflow + for { + _, err = reader.Read(buffer) + if err != nil { + return 0, 0, fmt.Errorf("reading key length: %w", err) + } + + previousKeyLength = partialKeyLength + partialKeyLength += uint16(buffer[0]) + + if partialKeyLength < previousKeyLength { + // the partial key can have a length up to 65535 which is the + // maximum uint16 value; therefore if we overflowed, we went over + // this maximum. + overflowed := maxPartialKeyLength - previousKeyLength + partialKeyLength + return 0, 0, fmt.Errorf("%w: overflowed by %d", ErrPartialKeyTooBig, overflowed) + } + + if buffer[0] < 255 { + // the end of the partial key length has been reached. + return variant, partialKeyLength, nil + } + } +} + +var ErrVariantUnknown = errors.New("node variant is unknown") + +func decodeHeaderByte(header byte) (variantBits, + partialKeyLengthHeader, partialKeyLengthHeaderMask byte, err error) { + // variants is a slice of all variants sorted in ascending + // order by the number of bits each variant mask occupy + // in the header byte. + // See https://spec.polkadot.network/#defn-node-header + // Performance note: see `Benchmark_decodeHeaderByte`; + // running with a locally scoped slice is as fast as having + // it at global scope. + variants := []variant{ + leafVariant, // mask 1100_0000 + branchVariant, // mask 1100_0000 + branchWithValueVariant, // mask 1100_0000 + } + + for i := len(variants) - 1; i >= 0; i-- { + variantBits = header & variants[i].mask + if variantBits != variants[i].bits { + continue + } + + partialKeyLengthHeaderMask = ^variants[i].mask + partialKeyLengthHeader = header & partialKeyLengthHeaderMask + return variantBits, partialKeyLengthHeader, + partialKeyLengthHeaderMask, nil + } + + return 0, 0, 0, fmt.Errorf("%w: for header byte %08b", ErrVariantUnknown, header) } diff --git a/internal/trie/node/header_test.go b/internal/trie/node/header_test.go index 1ed826483a..8c572bbaf2 100644 --- a/internal/trie/node/header_test.go +++ b/internal/trie/node/header_test.go @@ -4,10 +4,14 @@ package node import ( + "bytes" + "io" + "math" "testing" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_encodeHeader(t *testing.T) { @@ -22,7 +26,7 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0x80}}, + {written: []byte{branchVariant.bits}}, }, }, "branch with value": { @@ -31,7 +35,7 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0xc0}}, + {written: []byte{branchWithValueVariant.bits}}, }, }, "branch with key of length 30": { @@ -40,7 +44,7 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0x9e}}, + {written: []byte{branchVariant.bits | 30}}, }, }, "branch with key of length 62": { @@ -49,7 +53,7 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0xbe}}, + {written: []byte{branchVariant.bits | 62}}, }, }, "branch with key of length 63": { @@ -58,8 +62,9 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0xbf}}, - {written: []byte{0x0}}, + {written: []byte{branchVariant.bits | 63}}, + {written: []byte{0x00}}, // trailing 0 to indicate the partial + // key length is done here. }, }, "branch with key of length 64": { @@ -68,28 +73,17 @@ func Test_encodeHeader(t *testing.T) { Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ - {written: []byte{0xbf}}, - {written: []byte{0x1}}, + {written: []byte{branchVariant.bits | 63}}, + {written: []byte{0x01}}, }, }, - "branch with key too big": { - node: &Node{ - Key: make([]byte, 65535+63), - Children: make([]*Node, ChildrenCapacity), - }, - writes: []writeCall{ - {written: []byte{0xbf}}, - }, - errWrapped: ErrPartialKeyTooBig, - errMessage: "partial key length cannot be larger than or equal to 2^16: 65535", - }, "branch with small key length write error": { node: &Node{ Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ { - written: []byte{0x80}, + written: []byte{branchVariant.bits}, err: errTest, }, }, @@ -98,12 +92,15 @@ func Test_encodeHeader(t *testing.T) { }, "branch with long key length write error": { node: &Node{ - Key: make([]byte, 64), + Key: make([]byte, int(^branchVariant.mask)+1), Children: make([]*Node, ChildrenCapacity), }, writes: []writeCall{ { - written: []byte{0xbf}, + written: []byte{branchVariant.bits | ^branchVariant.mask}, + }, + { + written: []byte{0x01}, err: errTest, }, }, @@ -113,7 +110,7 @@ func Test_encodeHeader(t *testing.T) { "leaf with no key": { node: &Node{}, writes: []writeCall{ - {written: []byte{0x40}}, + {written: []byte{leafVariant.bits}}, }, }, "leaf with key of length 30": { @@ -121,7 +118,7 @@ func Test_encodeHeader(t *testing.T) { Key: make([]byte, 30), }, writes: []writeCall{ - {written: []byte{0x5e}}, + {written: []byte{leafVariant.bits | 30}}, }, }, "leaf with short key write error": { @@ -130,19 +127,19 @@ func Test_encodeHeader(t *testing.T) { }, writes: []writeCall{ { - written: []byte{0x5e}, + written: []byte{leafVariant.bits | 30}, err: errTest, }, }, errWrapped: errTest, - errMessage: errTest.Error(), + errMessage: "test error", }, "leaf with key of length 62": { node: &Node{ Key: make([]byte, 62), }, writes: []writeCall{ - {written: []byte{0x7e}}, + {written: []byte{leafVariant.bits | 62}}, }, }, "leaf with key of length 63": { @@ -150,7 +147,7 @@ func Test_encodeHeader(t *testing.T) { Key: make([]byte, 63), }, writes: []writeCall{ - {written: []byte{0x7f}}, + {written: []byte{leafVariant.bits | 63}}, {written: []byte{0x0}}, }, }, @@ -159,7 +156,7 @@ func Test_encodeHeader(t *testing.T) { Key: make([]byte, 64), }, writes: []writeCall{ - {written: []byte{0x7f}}, + {written: []byte{leafVariant.bits | 63}}, {written: []byte{0x1}}, }, }, @@ -169,22 +166,32 @@ func Test_encodeHeader(t *testing.T) { }, writes: []writeCall{ { - written: []byte{0x7f}, + written: []byte{leafVariant.bits | 63}, err: errTest, }, }, errWrapped: errTest, - errMessage: errTest.Error(), + errMessage: "test error", }, - "leaf with key too big": { + "leaf with key length over 3 bytes": { node: &Node{ - Key: make([]byte, 65535+63), + Key: make([]byte, int(^leafVariant.mask)+0b1111_1111+0b0000_0001), }, writes: []writeCall{ - {written: []byte{0x7f}}, + {written: []byte{leafVariant.bits | ^leafVariant.mask}}, + {written: []byte{0b1111_1111}}, + {written: []byte{0b0000_0001}}, + }, + }, + "leaf with key length over 3 bytes and last byte zero": { + node: &Node{ + Key: make([]byte, int(^leafVariant.mask)+0b1111_1111), + }, + writes: []writeCall{ + {written: []byte{leafVariant.bits | ^leafVariant.mask}}, + {written: []byte{0b1111_1111}}, + {written: []byte{0x00}}, }, - errWrapped: ErrPartialKeyTooBig, - errMessage: "partial key length cannot be larger than or equal to 2^16: 65535", }, } @@ -215,4 +222,211 @@ func Test_encodeHeader(t *testing.T) { } }) } + + t.Run("partial key length is too big", func(t *testing.T) { + t.Parallel() + + const keyLength = uint(maxPartialKeyLength) + 1 + node := &Node{ + Key: make([]byte, keyLength), + } + + assert.PanicsWithValue(t, "partial key length is too big: 65536", func() { + _ = encodeHeader(node, io.Discard) + }) + }) +} + +func Test_encodeHeader_At_Maximum(t *testing.T) { + t.Parallel() + + // Note: this test case cannot run with the + // mock writer since it's too slow, so we use + // an actual buffer. + + variant := leafVariant.bits + const partialKeyLengthHeaderMask = 0b0011_1111 + const keyLength = uint(maxPartialKeyLength) + extraKeyBytesNeeded := math.Ceil(float64(maxPartialKeyLength-partialKeyLengthHeaderMask) / 255.0) + expectedEncodingLength := 1 + int(extraKeyBytesNeeded) + + lengthLeft := maxPartialKeyLength + expectedBytes := make([]byte, expectedEncodingLength) + expectedBytes[0] = variant | partialKeyLengthHeaderMask + lengthLeft -= partialKeyLengthHeaderMask + for i := 1; i < len(expectedBytes)-1; i++ { + expectedBytes[i] = 255 + lengthLeft -= 255 + } + expectedBytes[len(expectedBytes)-1] = byte(lengthLeft) + + buffer := bytes.NewBuffer(nil) + buffer.Grow(expectedEncodingLength) + + node := &Node{ + Key: make([]byte, keyLength), + } + + err := encodeHeader(node, buffer) + + require.NoError(t, err) + assert.Equal(t, expectedBytes, buffer.Bytes()) +} + +func Test_decodeHeader(t *testing.T) { + testCases := map[string]struct { + reads []readCall + variant byte + partialKeyLength uint16 + errWrapped error + errMessage string + }{ + "first byte read error": { + reads: []readCall{ + {buffArgCap: 1, err: errTest}, + }, + errWrapped: errTest, + errMessage: "reading header byte: test error", + }, + "header byte decoding error": { + reads: []readCall{ + {buffArgCap: 1, read: []byte{0b0011_1110}}, + }, + errWrapped: ErrVariantUnknown, + errMessage: "decoding header byte: node variant is unknown: for header byte 00111110", + }, + "partial key length contained in first byte": { + reads: []readCall{ + {buffArgCap: 1, read: []byte{leafVariant.bits | 0b0011_1110}}, + }, + variant: leafVariant.bits, + partialKeyLength: uint16(0b0011_1110), + }, + "long partial key length and second byte read error": { + reads: []readCall{ + {buffArgCap: 1, read: []byte{leafVariant.bits | 0b0011_1111}}, + {buffArgCap: 1, err: errTest}, + }, + errWrapped: errTest, + errMessage: "reading key length: test error", + }, + "partial key length spread on multiple bytes": { + reads: []readCall{ + {buffArgCap: 1, read: []byte{leafVariant.bits | 0b0011_1111}}, + {buffArgCap: 1, read: []byte{0b1111_1111}}, + {buffArgCap: 1, read: []byte{0b1111_0000}}, + }, + variant: leafVariant.bits, + partialKeyLength: uint16(0b0011_1111 + 0b1111_1111 + 0b1111_0000), + }, + "partial key length too long": { + reads: repeatReadCall(readCall{ + buffArgCap: 1, + read: []byte{0b1111_1111}, + }, 258), + errWrapped: ErrPartialKeyTooBig, + errMessage: "partial key length cannot be larger than 2^16: overflowed by 254", + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + + reader := NewMockReader(ctrl) + var previousCall *gomock.Call + for _, readCall := range testCase.reads { + readCall := readCall // required variable pinning + byteSliceCapMatcher := newByteSliceCapMatcher(readCall.buffArgCap) + call := reader.EXPECT().Read(byteSliceCapMatcher). + DoAndReturn(func(b []byte) (n int, err error) { + copy(b, readCall.read) + return readCall.n, readCall.err + }) + if previousCall != nil { + call.After(previousCall) + } + previousCall = call + } + + variant, partialKeyLength, err := decodeHeader(reader) + + assert.Equal(t, testCase.variant, variant) + assert.Equal(t, int(testCase.partialKeyLength), int(partialKeyLength)) + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + }) + } +} + +func Test_decodeHeaderByte(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + header byte + variantBits byte + partialKeyLengthHeader byte + partialKeyLengthHeaderMask byte + errWrapped error + errMessage string + }{ + "branch with value header": { + header: 0b1110_1001, + variantBits: 0b1100_0000, + partialKeyLengthHeader: 0b0010_1001, + partialKeyLengthHeaderMask: 0b0011_1111, + }, + "branch header": { + header: 0b1010_1001, + variantBits: 0b1000_0000, + partialKeyLengthHeader: 0b0010_1001, + partialKeyLengthHeaderMask: 0b0011_1111, + }, + "leaf header": { + header: 0b0110_1001, + variantBits: 0b0100_0000, + partialKeyLengthHeader: 0b0010_1001, + partialKeyLengthHeaderMask: 0b0011_1111, + }, + "unknown variant header": { + header: 0b0000_0000, + errWrapped: ErrVariantUnknown, + errMessage: "node variant is unknown: for header byte 00000000", + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + variantBits, partialKeyLengthHeader, + partialKeyLengthHeaderMask, err := decodeHeaderByte(testCase.header) + + assert.Equal(t, testCase.variantBits, variantBits) + assert.Equal(t, testCase.partialKeyLengthHeader, partialKeyLengthHeader) + assert.Equal(t, testCase.partialKeyLengthHeaderMask, partialKeyLengthHeaderMask) + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + }) + } +} + +func Benchmark_decodeHeaderByte(b *testing.B) { + // With global scoped variants slice: + // 3.453 ns/op 0 B/op 0 allocs/op + // With locally scoped variants slice: + // 3.441 ns/op 0 B/op 0 allocs/op + header := leafVariant.bits | 0b0000_0001 + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _, _ = decodeHeaderByte(header) + } } diff --git a/internal/trie/node/key.go b/internal/trie/node/key.go index 3b450513bb..343a5d747d 100644 --- a/internal/trie/node/key.go +++ b/internal/trie/node/key.go @@ -4,92 +4,31 @@ package node import ( - "bytes" "errors" "fmt" "io" "github.com/ChainSafe/gossamer/internal/trie/codec" - "github.com/ChainSafe/gossamer/internal/trie/pools" ) -const maxPartialKeySize = ^uint16(0) +const maxPartialKeyLength = ^uint16(0) -var ( - ErrPartialKeyTooBig = errors.New("partial key length cannot be larger than or equal to 2^16") - ErrReadKeyLength = errors.New("cannot read key length") - ErrReadKeyData = errors.New("cannot read key data") -) - -// encodeKeyLength encodes the key length. -func encodeKeyLength(keyLength int, writer io.Writer) (err error) { - keyLength -= 63 - - if keyLength >= int(maxPartialKeySize) { - return fmt.Errorf("%w: %d", - ErrPartialKeyTooBig, keyLength) - } - - for i := uint16(0); i < maxPartialKeySize; i++ { - if keyLength < 255 { - _, err = writer.Write([]byte{byte(keyLength)}) - if err != nil { - return err - } - break - } - _, err = writer.Write([]byte{255}) - if err != nil { - return err - } - - keyLength -= 255 - } - - return nil -} +var ErrReaderMismatchCount = errors.New("read unexpected number of bytes from reader") // decodeKey decodes a key from a reader. -func decodeKey(reader io.Reader, keyLengthByte byte) (b []byte, err error) { - keyLength := int(keyLengthByte) - - if keyLengthByte == keyLenOffset { - // partial key longer than 63, read next bytes for rest of pk len - buffer := pools.SingleByteBuffers.Get().(*bytes.Buffer) - defer pools.SingleByteBuffers.Put(buffer) - oneByteBuf := buffer.Bytes() - for { - _, err = reader.Read(oneByteBuf) - if err != nil { - return nil, fmt.Errorf("%w: %s", ErrReadKeyLength, err) - } - nextKeyLen := oneByteBuf[0] - - keyLength += int(nextKeyLen) - - if nextKeyLen < 0xff { - break - } - - if keyLength >= int(maxPartialKeySize) { - return nil, fmt.Errorf("%w: %d", - ErrPartialKeyTooBig, keyLength) - } - } - } - - if keyLength == 0 { +func decodeKey(reader io.Reader, partialKeyLength uint16) (b []byte, err error) { + if partialKeyLength == 0 { return []byte{}, nil } - key := make([]byte, keyLength/2+keyLength%2) + key := make([]byte, partialKeyLength/2+partialKeyLength%2) n, err := reader.Read(key) if err != nil { - return nil, fmt.Errorf("%w: %s", ErrReadKeyData, err) + return nil, fmt.Errorf("reading from reader: %w", err) } else if n != len(key) { - return nil, fmt.Errorf("%w: read %d bytes instead of %d", - ErrReadKeyData, n, len(key)) + return nil, fmt.Errorf("%w: read %d bytes instead of expected %d bytes", + ErrReaderMismatchCount, n, len(key)) } - return codec.KeyLEToNibbles(key)[keyLength%2:], nil + return codec.KeyLEToNibbles(key)[partialKeyLength%2:], nil } diff --git a/internal/trie/node/key_test.go b/internal/trie/node/key_test.go index 2e21825cce..930a97c656 100644 --- a/internal/trie/node/key_test.go +++ b/internal/trie/node/key_test.go @@ -4,13 +4,11 @@ package node import ( - "bytes" "fmt" "testing" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func repeatBytes(n int, b byte) (slice []byte) { @@ -21,129 +19,6 @@ func repeatBytes(n int, b byte) (slice []byte) { return slice } -func Test_encodeKeyLength(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - keyLength int - writes []writeCall - errWrapped error - errMessage string - }{ - "length equal to maximum": { - keyLength: int(maxPartialKeySize) + 63, - errWrapped: ErrPartialKeyTooBig, - errMessage: "partial key length cannot be " + - "larger than or equal to 2^16: 65535", - }, - "zero length": { - writes: []writeCall{ - { - written: []byte{0xc1}, - }, - }, - }, - "one length": { - keyLength: 1, - writes: []writeCall{ - { - written: []byte{0xc2}, - }, - }, - }, - "error at single byte write": { - keyLength: 1, - writes: []writeCall{ - { - written: []byte{0xc2}, - err: errTest, - }, - }, - errWrapped: errTest, - errMessage: errTest.Error(), - }, - "error at first byte write": { - keyLength: 255 + 100 + 63, - writes: []writeCall{ - { - written: []byte{255}, - err: errTest, - }, - }, - errWrapped: errTest, - errMessage: errTest.Error(), - }, - "error at last byte write": { - keyLength: 255 + 100 + 63, - writes: []writeCall{ - { - written: []byte{255}, - }, - { - written: []byte{100}, - err: errTest, - }, - }, - errWrapped: errTest, - errMessage: errTest.Error(), - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - writer := NewMockWriter(ctrl) - var previousCall *gomock.Call - for _, write := range testCase.writes { - call := writer.EXPECT(). - Write(write.written). - Return(write.n, write.err) - - if write.err != nil { - break - } else if previousCall != nil { - call.After(previousCall) - } - previousCall = call - } - - err := encodeKeyLength(testCase.keyLength, writer) - - assert.ErrorIs(t, err, testCase.errWrapped) - if testCase.errWrapped != nil { - assert.EqualError(t, err, testCase.errMessage) - } - }) - } - - t.Run("length at maximum", func(t *testing.T) { - t.Parallel() - - // Note: this test case cannot run with the - // mock writer since it's too slow, so we use - // an actual buffer. - - const keyLength = int(maxPartialKeySize) + 62 - const expectedEncodingLength = 257 - expectedBytes := make([]byte, expectedEncodingLength) - for i := 0; i < len(expectedBytes)-1; i++ { - expectedBytes[i] = 255 - } - expectedBytes[len(expectedBytes)-1] = 254 - - buffer := bytes.NewBuffer(nil) - buffer.Grow(expectedEncodingLength) - - err := encodeKeyLength(keyLength, buffer) - - require.NoError(t, err) - assert.Equal(t, expectedBytes, buffer.Bytes()) - }) -} - //go:generate mockgen -destination=reader_mock_test.go -package $GOPACKAGE io Reader type readCall struct { @@ -153,20 +28,12 @@ type readCall struct { err error } -func repeatReadCalls(rc readCall, length int) (readCalls []readCall) { - readCalls = make([]readCall, length) - for i := range readCalls { - readCalls[i] = readCall{ - buffArgCap: rc.buffArgCap, - n: rc.n, - err: rc.err, - } - if rc.read != nil { - readCalls[i].read = make([]byte, len(rc.read)) - copy(readCalls[i].read, rc.read) - } +func repeatReadCall(base readCall, n int) (calls []readCall) { + calls = make([]readCall, n) + for i := range calls { + calls[i] = base } - return readCalls + return calls } var _ gomock.Matcher = (*byteSliceCapMatcher)(nil) @@ -184,7 +51,7 @@ func (b *byteSliceCapMatcher) Matches(x interface{}) bool { } func (b *byteSliceCapMatcher) String() string { - return fmt.Sprintf("capacity of slice is not the expected capacity %d", b.capacity) + return fmt.Sprintf("slice with capacity %d", b.capacity) } func newByteSliceCapMatcher(capacity int) *byteSliceCapMatcher { @@ -197,45 +64,45 @@ func Test_decodeKey(t *testing.T) { t.Parallel() testCases := map[string]struct { - reads []readCall - keyLength byte - b []byte - errWrapped error - errMessage string + reads []readCall + partialKeyLength uint16 + b []byte + errWrapped error + errMessage string }{ "zero key length": { - b: []byte{}, + partialKeyLength: 0, + b: []byte{}, }, "short key length": { reads: []readCall{ {buffArgCap: 3, read: []byte{1, 2, 3}, n: 3}, }, - keyLength: 5, - b: []byte{0x1, 0x0, 0x2, 0x0, 0x3}, + partialKeyLength: 5, + b: []byte{0x1, 0x0, 0x2, 0x0, 0x3}, }, "key read error": { reads: []readCall{ {buffArgCap: 3, err: errTest}, }, - keyLength: 5, - errWrapped: ErrReadKeyData, - errMessage: "cannot read key data: test error", + partialKeyLength: 5, + errWrapped: errTest, + errMessage: "reading from reader: test error", }, "key read bytes count mismatch": { reads: []readCall{ {buffArgCap: 3, n: 2}, }, - keyLength: 5, - errWrapped: ErrReadKeyData, - errMessage: "cannot read key data: read 2 bytes instead of 3", + partialKeyLength: 5, + errWrapped: ErrReaderMismatchCount, + errMessage: "read unexpected number of bytes from reader: read 2 bytes instead of expected 3 bytes", }, "long key length": { reads: []readCall{ - {buffArgCap: 1, read: []byte{6}, n: 1}, // key length {buffArgCap: 35, read: repeatBytes(35, 7), n: 35}, // key data }, - keyLength: 0x3f, + partialKeyLength: 70, b: []byte{ 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, @@ -245,20 +112,6 @@ func Test_decodeKey(t *testing.T) { 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7, 0x0, 0x7}, }, - "key length read error": { - reads: []readCall{ - {buffArgCap: 1, err: errTest}, - }, - keyLength: 0x3f, - errWrapped: ErrReadKeyLength, - errMessage: "cannot read key length: test error", - }, - "key length too big": { - reads: repeatReadCalls(readCall{buffArgCap: 1, read: []byte{0xff}, n: 1}, 257), - keyLength: 0x3f, - errWrapped: ErrPartialKeyTooBig, - errMessage: "partial key length cannot be larger than or equal to 2^16: 65598", - }, } for name, testCase := range testCases { @@ -270,6 +123,7 @@ func Test_decodeKey(t *testing.T) { reader := NewMockReader(ctrl) var previousCall *gomock.Call for _, readCall := range testCase.reads { + readCall := readCall // required variable pinning byteSliceCapMatcher := newByteSliceCapMatcher(readCall.buffArgCap) call := reader.EXPECT().Read(byteSliceCapMatcher). DoAndReturn(func(b []byte) (n int, err error) { @@ -282,7 +136,7 @@ func Test_decodeKey(t *testing.T) { previousCall = call } - b, err := decodeKey(reader, testCase.keyLength) + b, err := decodeKey(reader, testCase.partialKeyLength) assert.ErrorIs(t, err, testCase.errWrapped) if err != nil { diff --git a/internal/trie/node/node.go b/internal/trie/node/node.go index 493ca1de91..a40cf31fd7 100644 --- a/internal/trie/node/node.go +++ b/internal/trie/node/node.go @@ -1,6 +1,8 @@ // Copyright 2021 ChainSafe Systems (ON) // SPDX-License-Identifier: LGPL-3.0-only +// Package node defines the `Node` structure with methods +// to be used in the modified Merkle-Patricia Radix-16 trie. package node import ( diff --git a/internal/trie/node/variants.go b/internal/trie/node/variants.go new file mode 100644 index 0000000000..2c75c44904 --- /dev/null +++ b/internal/trie/node/variants.go @@ -0,0 +1,26 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package node + +type variant struct { + bits byte + mask byte +} + +// Node variants +// See https://spec.polkadot.network/#defn-node-header +var ( + leafVariant = variant{ // leaf 01 + bits: 0b0100_0000, + mask: 0b1100_0000, + } + branchVariant = variant{ // branch 10 + bits: 0b1000_0000, + mask: 0b1100_0000, + } + branchWithValueVariant = variant{ // branch 11 + bits: 0b1100_0000, + mask: 0b1100_0000, + } +) diff --git a/internal/trie/pools/pools.go b/internal/trie/pools/pools.go index 855232ef44..1bfe8f5a83 100644 --- a/internal/trie/pools/pools.go +++ b/internal/trie/pools/pools.go @@ -10,15 +10,6 @@ import ( "golang.org/x/crypto/blake2b" ) -// SingleByteBuffers is a sync pool of buffers of capacity 1. -var SingleByteBuffers = &sync.Pool{ - New: func() interface{} { - const bufferLength = 1 - b := make([]byte, bufferLength) - return bytes.NewBuffer(b) - }, -} - // DigestBuffers is a sync pool of buffers of capacity 32. var DigestBuffers = &sync.Pool{ New: func() interface{} { From e445bafd15744013cceefa80b21e6efacca26414 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jul 2022 14:03:22 -0400 Subject: [PATCH 27/48] chore(deps): bump github.com/klauspost/compress from 1.15.6 to 1.15.7 (#2637) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c3d0c66a56..8ba432d0f6 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/ipfs/go-ds-badger2 v0.1.1 github.com/ipfs/go-ipns v0.1.2 //indirect github.com/jpillora/ipfilter v1.2.6 - github.com/klauspost/compress v1.15.6 + github.com/klauspost/compress v1.15.7 github.com/libp2p/go-libp2p v0.15.1 github.com/libp2p/go-libp2p-core v0.9.0 github.com/libp2p/go-libp2p-discovery v0.5.1 diff --git a/go.sum b/go.sum index a975634576..4e1907578e 100644 --- a/go.sum +++ b/go.sum @@ -605,8 +605,8 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= -github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok= +github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= From e88486a2189aca0976b56e3cb32ba64093a8a784 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jul 2022 14:03:46 -0400 Subject: [PATCH 28/48] chore(deps): bump github.com/stretchr/testify from 1.7.2 to 1.8.0 (#2638) --- go.mod | 4 ++-- go.sum | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 8ba432d0f6..65ad2a6b72 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_model v0.2.0 github.com/qdm12/gotree v0.2.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.8.0 github.com/urfave/cli v1.22.9 github.com/wasmerio/go-ext-wasm v0.3.2-0.20200326095750-0a32be6068ec golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 @@ -163,7 +163,7 @@ require ( github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect - github.com/stretchr/objx v0.1.1 // indirect + github.com/stretchr/objx v0.4.0 // indirect github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce // indirect diff --git a/go.sum b/go.sum index 4e1907578e..2d325e85bf 100644 --- a/go.sum +++ b/go.sum @@ -1222,8 +1222,9 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1232,8 +1233,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= From 90529f6c0d9b58dc960c7704849413b4dbf30cdd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jul 2022 14:07:08 -0400 Subject: [PATCH 29/48] chore(deps): bump github.com/ethereum/go-ethereum (#2639) --- go.mod | 13 ++++++------- go.sum | 23 +++++++++++++---------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 65ad2a6b72..548e37306b 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de github.com/disiqueira/gotree v1.0.0 github.com/docker/docker v20.10.17+incompatible - github.com/ethereum/go-ethereum v1.10.19 + github.com/ethereum/go-ethereum v1.10.20 github.com/fatih/color v1.13.0 github.com/go-playground/validator/v10 v10.11.0 github.com/golang/mock v1.6.0 @@ -44,7 +44,7 @@ require ( github.com/urfave/cli v1.22.9 github.com/wasmerio/go-ext-wasm v0.3.2-0.20200326095750-0a32be6068ec golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 - golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/text v0.3.7 google.golang.org/protobuf v1.28.0 ) @@ -57,7 +57,7 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set v1.8.0 // indirect @@ -159,9 +159,8 @@ require ( github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rs/cors v1.7.0 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/stretchr/objx v0.4.0 // indirect github.com/tklauser/go-sysconf v0.3.5 // indirect @@ -176,9 +175,9 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect + golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect google.golang.org/appengine v1.6.6 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 2d325e85bf..b424162b7a 100644 --- a/go.sum +++ b/go.sum @@ -192,8 +192,8 @@ github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -262,8 +262,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg= -github.com/ethereum/go-ethereum v1.10.19 h1:EOR5JbL4MD5yeOqv8W2iC1s4NximrTjqFccUz8lyBRA= -github.com/ethereum/go-ethereum v1.10.19/go.mod h1:IJBNMtzKcNHPtllYihy6BL2IgK1u+32JriaTbdt4v+w= +github.com/ethereum/go-ethereum v1.10.20 h1:75IW830ClSS40yrQC1ZCMZCt5I+zU16oqId2SiQwdQ4= +github.com/ethereum/go-ethereum v1.10.20/go.mod h1:LWUN82TCHGpxB3En5HVmLLzPD7YSrEUFmFfN1nKkVN0= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= @@ -1153,8 +1153,9 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -1184,7 +1185,6 @@ github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= @@ -1238,7 +1238,7 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= @@ -1457,8 +1457,9 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d h1:4SFsTMi4UahlKoloni7L4eYzhFRifURQLw+yv0QDCx8= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1564,11 +1565,13 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 548d75a9360b7facb730949b6e42a9030d94d6d8 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Mon, 4 Jul 2022 16:26:57 -0400 Subject: [PATCH 30/48] chore(trie): update tests to always have leaves with values (#2588) - Leaf without value cannot exist per the specification - Remove test cases for empty leaves - Change test cases to always have non empty leaves - Remove leaf type check in node encoding --- dot/state/tries_test.go | 6 +- internal/trie/node/branch_encode_test.go | 67 ++-- internal/trie/node/copy_test.go | 23 +- internal/trie/node/encode.go | 3 +- internal/trie/node/encode_test.go | 25 +- internal/trie/node/hash_test.go | 21 +- internal/trie/node/header_test.go | 2 +- internal/trie/node/node_test.go | 10 - lib/trie/database.go | 2 - lib/trie/trie_test.go | 455 +++++++++++++---------- 10 files changed, 326 insertions(+), 288 deletions(-) diff --git a/dot/state/tries_test.go b/dot/state/tries_test.go index 388a689106..50aa601633 100644 --- a/dot/state/tries_test.go +++ b/dot/state/tries_test.go @@ -169,13 +169,15 @@ func Test_Tries_get(t *testing.T) { tries: &Tries{ rootToTrie: map[common.Hash]*trie.Trie{ {1, 2, 3}: trie.NewTrie(&node.Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }), }, }, root: common.Hash{1, 2, 3}, trie: trie.NewTrie(&node.Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }), }, "not found in map": { diff --git a/internal/trie/node/branch_encode_test.go b/internal/trie/node/branch_encode_test.go index 88efb2e752..c0028cc897 100644 --- a/internal/trie/node/branch_encode_test.go +++ b/internal/trie/node/branch_encode_test.go @@ -218,11 +218,11 @@ func Test_encodeChildrenOpportunisticParallel(t *testing.T) { "no children": {}, "first child not nil": { children: []*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, }, }, @@ -231,25 +231,25 @@ func Test_encodeChildrenOpportunisticParallel(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, }, }, "first two children not nil": { children: []*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{2}}, + {Key: []byte{3}, Value: []byte{4}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, { - written: []byte{12, 65, 2, 0}, + written: []byte{16, 65, 3, 4, 4}, }, }, }, @@ -258,12 +258,12 @@ func Test_encodeChildrenOpportunisticParallel(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, nil, nil, nil, nil, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, err: errTest, }, }, @@ -278,13 +278,13 @@ func Test_encodeChildrenOpportunisticParallel(t *testing.T) { { Key: []byte{1}, Children: []*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, }, }, writes: []writeCall{ { - written: []byte{32, 129, 1, 1, 0, 12, 65, 1, 0}, + written: []byte{36, 129, 1, 1, 0, 16, 65, 1, 4, 2}, }, }, }, @@ -360,11 +360,11 @@ func Test_encodeChildrenSequentially(t *testing.T) { "no children": {}, "first child not nil": { children: []*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, }, }, @@ -373,25 +373,25 @@ func Test_encodeChildrenSequentially(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, }, }, "first two children not nil": { children: []*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{2}}, + {Key: []byte{3}, Value: []byte{4}}, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, }, { - written: []byte{12, 65, 2, 0}, + written: []byte{16, 65, 3, 4, 4}, }, }, }, @@ -400,12 +400,12 @@ func Test_encodeChildrenSequentially(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{2}}, nil, nil, nil, nil, }, writes: []writeCall{ { - written: []byte{12, 65, 1, 0}, + written: []byte{16, 65, 1, 4, 2}, err: errTest, }, }, @@ -457,13 +457,6 @@ func Test_encodeChild(t *testing.T) { errMessage string }{ "nil node": {}, - "empty leaf child": { - child: &Node{}, - writeCall: true, - write: writeCall{ - written: []byte{8, 64, 0}, - }, - }, "empty branch child": { child: &Node{ Children: make([]*Node, ChildrenCapacity), @@ -547,25 +540,15 @@ func Test_scaleEncodeHash(t *testing.T) { wrappedErr error errMessage string }{ - "empty leaf": { - node: &Node{}, - encoding: []byte{0x8, 0x40, 0}, - }, - "empty branch": { - node: &Node{ - Children: make([]*Node, ChildrenCapacity), - }, - encoding: []byte{0xc, 0x80, 0x0, 0x0}, - }, - "non empty branch": { + "branch": { node: &Node{ Key: []byte{1, 2}, Value: []byte{3, 4}, Children: []*Node{ - nil, nil, {Key: []byte{9}}, + nil, nil, {Key: []byte{9}, Value: []byte{1}}, }, }, - encoding: []byte{0x2c, 0xc2, 0x12, 0x4, 0x0, 0x8, 0x3, 0x4, 0xc, 0x41, 0x9, 0x0}, + encoding: []byte{0x30, 0xc2, 0x12, 0x4, 0x0, 0x8, 0x3, 0x4, 0x10, 0x41, 0x9, 0x4, 0x1}, }, } diff --git a/internal/trie/node/copy_test.go b/internal/trie/node/copy_test.go index ad0f2f0a2a..8816ee4604 100644 --- a/internal/trie/node/copy_test.go +++ b/internal/trie/node/copy_test.go @@ -42,7 +42,8 @@ func Test_Node_Copy(t *testing.T) { Value: []byte{3, 4}, Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), Dirty: true, @@ -55,7 +56,8 @@ func Test_Node_Copy(t *testing.T) { Value: []byte{3, 4}, Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), Dirty: true, @@ -65,7 +67,8 @@ func Test_Node_Copy(t *testing.T) { node: &Node{ Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), }, @@ -75,7 +78,8 @@ func Test_Node_Copy(t *testing.T) { expectedNode: &Node{ Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), }, @@ -86,7 +90,8 @@ func Test_Node_Copy(t *testing.T) { Value: []byte{3, 4}, Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), Dirty: true, @@ -99,7 +104,8 @@ func Test_Node_Copy(t *testing.T) { Value: []byte{3, 4}, Children: padRightChildren([]*Node{ nil, nil, { - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), Dirty: true, @@ -107,11 +113,6 @@ func Test_Node_Copy(t *testing.T) { Encoding: []byte{6}, }, }, - "empty leaf": { - node: &Node{}, - settings: DefaultCopySettings, - expectedNode: &Node{}, - }, "non empty leaf": { node: &Node{ Key: []byte{1, 2}, diff --git a/internal/trie/node/encode.go b/internal/trie/node/encode.go index 5bea739c0c..c605da7302 100644 --- a/internal/trie/node/encode.go +++ b/internal/trie/node/encode.go @@ -45,8 +45,7 @@ func (n *Node) Encode(buffer Buffer) (err error) { // check value is not nil for branch nodes, even though // leaf nodes always have a non-nil value. - if n.Type() == Leaf || n.Value != nil { - // TODO remove `n.Type() == Leaf` and update tests + if n.Value != nil { encodedValue, err := scale.Marshal(n.Value) // TODO scale encoder to write to buffer if err != nil { return fmt.Errorf("cannot scale encode value: %w", err) diff --git a/internal/trie/node/encode_test.go b/internal/trie/node/encode_test.go index ea6a4fb47e..2fb208cf00 100644 --- a/internal/trie/node/encode_test.go +++ b/internal/trie/node/encode_test.go @@ -72,7 +72,8 @@ func Test_Node_Encode(t *testing.T) { }, "leaf buffer write error for encoded key": { node: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, writes: []writeCall{ { @@ -188,8 +189,8 @@ func Test_Node_Encode(t *testing.T) { Key: []byte{1, 2, 3}, Value: []byte{100}, Children: []*Node{ - nil, nil, nil, {Key: []byte{9}}, - nil, nil, nil, {Key: []byte{11}}, + nil, nil, nil, {Key: []byte{9}, Value: []byte{1}}, + nil, nil, nil, {Key: []byte{11}, Value: []byte{1}}, }, }, writes: []writeCall{ @@ -212,8 +213,8 @@ func Test_Node_Encode(t *testing.T) { Key: []byte{1, 2, 3}, Value: []byte{100}, Children: []*Node{ - nil, nil, nil, {Key: []byte{9}}, - nil, nil, nil, {Key: []byte{11}}, + nil, nil, nil, {Key: []byte{9}, Value: []byte{1}}, + nil, nil, nil, {Key: []byte{11}, Value: []byte{1}}, }, }, writes: []writeCall{ @@ -239,8 +240,8 @@ func Test_Node_Encode(t *testing.T) { Key: []byte{1, 2, 3}, Value: []byte{100}, Children: []*Node{ - nil, nil, nil, {Key: []byte{9}}, - nil, nil, nil, {Key: []byte{11}}, + nil, nil, nil, {Key: []byte{9}, Value: []byte{1}}, + nil, nil, nil, {Key: []byte{11}, Value: []byte{1}}, }, }, writes: []writeCall{ @@ -257,7 +258,7 @@ func Test_Node_Encode(t *testing.T) { written: []byte{4, 100}, }, { // children - written: []byte{12, 65, 9, 0}, + written: []byte{16, 65, 9, 4, 1}, err: errTest, }, }, @@ -271,8 +272,8 @@ func Test_Node_Encode(t *testing.T) { Key: []byte{1, 2, 3}, Value: []byte{100}, Children: []*Node{ - nil, nil, nil, {Key: []byte{9}}, - nil, nil, nil, {Key: []byte{11}}, + nil, nil, nil, {Key: []byte{9}, Value: []byte{1}}, + nil, nil, nil, {Key: []byte{11}, Value: []byte{1}}, }, }, writes: []writeCall{ @@ -289,10 +290,10 @@ func Test_Node_Encode(t *testing.T) { written: []byte{4, 100}, }, { // first children - written: []byte{12, 65, 9, 0}, + written: []byte{16, 65, 9, 4, 1}, }, { // second children - written: []byte{12, 65, 11, 0}, + written: []byte{16, 65, 11, 4, 1}, }, }, }, diff --git a/internal/trie/node/hash_test.go b/internal/trie/node/hash_test.go index b2d785342b..703845d514 100644 --- a/internal/trie/node/hash_test.go +++ b/internal/trie/node/hash_test.go @@ -21,16 +21,6 @@ func Test_Node_EncodeAndHash(t *testing.T) { errWrapped error errMessage string }{ - "empty leaf": { - node: Node{}, - expectedNode: Node{ - Encoding: []byte{0x40, 0x0}, - HashDigest: []byte{0x40, 0x0}, - }, - encoding: []byte{0x40, 0x0}, - hash: []byte{0x40, 0x0}, - isRoot: false, - }, "small leaf encoding": { node: Node{ Key: []byte{1}, @@ -93,14 +83,15 @@ func Test_Node_EncodeAndHash(t *testing.T) { }, "large leaf encoding": { node: Node{ - Key: repeatBytes(65, 7), + Key: repeatBytes(65, 7), + Value: []byte{0x01}, }, expectedNode: Node{ - Encoding: []byte{0x7f, 0x2, 0x7, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x0}, //nolint:lll - HashDigest: []byte{0xfb, 0xae, 0x31, 0x4b, 0xef, 0x31, 0x9, 0xc7, 0x62, 0x99, 0x9d, 0x40, 0x9b, 0xd4, 0xdc, 0x64, 0xe7, 0x39, 0x46, 0x8b, 0xd3, 0xaf, 0xe8, 0x63, 0x9d, 0xf9, 0x41, 0x40, 0x76, 0x40, 0x10, 0xa3}, //nolint:lll + Encoding: []byte{0x7f, 0x2, 0x7, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x4, 0x1}, //nolint:lll + HashDigest: []byte{0xd2, 0x1d, 0x43, 0x7, 0x18, 0x17, 0x1b, 0xf1, 0x45, 0x9c, 0xe5, 0x8f, 0xd7, 0x79, 0x82, 0xb, 0xc8, 0x5c, 0x8, 0x47, 0xfe, 0x6c, 0x99, 0xc5, 0xe9, 0x57, 0x87, 0x7, 0x1d, 0x2e, 0x24, 0x5d}, //nolint:lll }, - encoding: []byte{0x7f, 0x2, 0x7, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x0}, //nolint:lll - hash: []byte{0xfb, 0xae, 0x31, 0x4b, 0xef, 0x31, 0x9, 0xc7, 0x62, 0x99, 0x9d, 0x40, 0x9b, 0xd4, 0xdc, 0x64, 0xe7, 0x39, 0x46, 0x8b, 0xd3, 0xaf, 0xe8, 0x63, 0x9d, 0xf9, 0x41, 0x40, 0x76, 0x40, 0x10, 0xa3}, //nolint:lll + encoding: []byte{0x7f, 0x2, 0x7, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x4, 0x1}, //nolint:lll + hash: []byte{0xd2, 0x1d, 0x43, 0x7, 0x18, 0x17, 0x1b, 0xf1, 0x45, 0x9c, 0xe5, 0x8f, 0xd7, 0x79, 0x82, 0xb, 0xc8, 0x5c, 0x8, 0x47, 0xfe, 0x6c, 0x99, 0xc5, 0xe9, 0x57, 0x87, 0x7, 0x1d, 0x2e, 0x24, 0x5d}, //nolint:lll isRoot: false, }, "empty branch": { diff --git a/internal/trie/node/header_test.go b/internal/trie/node/header_test.go index 8c572bbaf2..bc77715d1c 100644 --- a/internal/trie/node/header_test.go +++ b/internal/trie/node/header_test.go @@ -108,7 +108,7 @@ func Test_encodeHeader(t *testing.T) { errMessage: "test error", }, "leaf with no key": { - node: &Node{}, + node: &Node{Value: []byte{1}}, writes: []writeCall{ {written: []byte{leafVariant.bits}}, }, diff --git a/internal/trie/node/node_test.go b/internal/trie/node/node_test.go index a16f3c8912..5fb99c189b 100644 --- a/internal/trie/node/node_test.go +++ b/internal/trie/node/node_test.go @@ -16,16 +16,6 @@ func Test_Node_String(t *testing.T) { node *Node s string }{ - "empty leaf": { - node: &Node{}, - s: `Leaf -├── Generation: 0 -├── Dirty: false -├── Key: nil -├── Value: nil -├── Calculated encoding: nil -└── Calculated digest: nil`, - }, "leaf with value smaller than 1024": { node: &Node{ Key: []byte{1, 2}, diff --git a/lib/trie/database.go b/lib/trie/database.go index 6c300a8b04..4c608b9d1c 100644 --- a/lib/trie/database.go +++ b/lib/trie/database.go @@ -448,8 +448,6 @@ func (t *Trie) GetInsertedNodeHashes() (hashesSet map[common.Hash]struct{}, err } func (t *Trie) getInsertedNodeHashes(n *Node, hashes map[common.Hash]struct{}) (err error) { - // TODO pass map of hashes or slice as argument to avoid copying - // and using more memory. if n == nil || !n.Dirty { return nil } diff --git a/lib/trie/trie_test.go b/lib/trie/trie_test.go index 6069af82f1..b27b68cdd3 100644 --- a/lib/trie/trie_test.go +++ b/lib/trie/trie_test.go @@ -47,18 +47,18 @@ func Test_Trie_Snapshot(t *testing.T) { trie := &Trie{ generation: 8, - root: &Node{Key: []byte{8}}, + root: &Node{Key: []byte{8}, Value: []byte{1}}, childTries: map[common.Hash]*Trie{ {1}: { generation: 1, - root: &Node{Key: []byte{1}}, + root: &Node{Key: []byte{1}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{ {1}: {}, }, }, {2}: { generation: 2, - root: &Node{Key: []byte{2}}, + root: &Node{Key: []byte{2}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{ {2}: {}, }, @@ -72,16 +72,16 @@ func Test_Trie_Snapshot(t *testing.T) { expectedTrie := &Trie{ generation: 9, - root: &Node{Key: []byte{8}}, + root: &Node{Key: []byte{8}, Value: []byte{1}}, childTries: map[common.Hash]*Trie{ {1}: { generation: 2, - root: &Node{Key: []byte{1}}, + root: &Node{Key: []byte{1}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{}, }, {2}: { generation: 3, - root: &Node{Key: []byte{2}}, + root: &Node{Key: []byte{2}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{}, }, }, @@ -221,11 +221,11 @@ func Test_Trie_DeepCopy(t *testing.T) { "filled trie": { trieOriginal: &Trie{ generation: 1, - root: &Node{Key: []byte{1, 2}}, + root: &Node{Key: []byte{1, 2}, Value: []byte{1}}, childTries: map[common.Hash]*Trie{ {1, 2, 3}: { generation: 2, - root: &Node{Key: []byte{1}}, + root: &Node{Key: []byte{1}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{ {1, 2, 3}: {}, {3, 4, 5}: {}, @@ -239,11 +239,11 @@ func Test_Trie_DeepCopy(t *testing.T) { }, trieCopy: &Trie{ generation: 1, - root: &Node{Key: []byte{1, 2}}, + root: &Node{Key: []byte{1, 2}, Value: []byte{1}}, childTries: map[common.Hash]*Trie{ {1, 2, 3}: { generation: 2, - root: &Node{Key: []byte{1}}, + root: &Node{Key: []byte{1}, Value: []byte{1}}, deletedKeys: map[common.Hash]struct{}{ {1, 2, 3}: {}, {3, 4, 5}: {}, @@ -277,11 +277,13 @@ func Test_Trie_RootNode(t *testing.T) { trie := Trie{ root: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, } expectedRoot := &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, } root := trie.RootNode() @@ -330,7 +332,8 @@ func Test_encodeRoot(t *testing.T) { }, "root encoding error": { root: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, bufferCalls: bufferCalls{ writeCalls: []writeCall{ @@ -343,27 +346,30 @@ func Test_encodeRoot(t *testing.T) { errWrapped: errTest, errMessage: "cannot encode header: test error", expectedRoot: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, }, "root encoding success": { root: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, bufferCalls: bufferCalls{ writeCalls: []writeCall{ {written: []byte{66}}, {written: []byte{18}}, - {written: []byte{0}}, + {written: []byte{4, 1}}, }, lenCall: true, lenReturn: 3, bytesCall: true, - bytesReturn: []byte{66, 18, 0}, + bytesReturn: []byte{66, 18, 4, 1}, }, expectedRoot: &Node{ Key: []byte{1, 2}, - Encoding: []byte{66, 18, 0}, + Value: []byte{1}, + Encoding: []byte{66, 18, 4}, }, }, } @@ -446,18 +452,20 @@ func Test_Trie_Hash(t *testing.T) { "leaf root": { trie: Trie{ root: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, }, hash: common.Hash{ - 0x84, 0x7c, 0x95, 0x42, 0x8d, 0x9c, 0xcf, 0xce, - 0xa7, 0x27, 0x15, 0x33, 0x48, 0x74, 0x99, 0x11, - 0x83, 0xb8, 0xe8, 0xc4, 0x80, 0x88, 0xea, 0x4d, - 0x9f, 0x57, 0x82, 0x94, 0xc9, 0x76, 0xf4, 0x6f}, + 0xa8, 0x13, 0x7c, 0xee, 0xb4, 0xad, 0xea, 0xac, + 0x9e, 0x5b, 0x37, 0xe2, 0x8e, 0x7d, 0x64, 0x78, + 0xac, 0xba, 0xb0, 0x6e, 0x90, 0x76, 0xe4, 0x67, + 0xa1, 0xd8, 0xa2, 0x29, 0x4e, 0x4a, 0xd9, 0xa3}, expectedTrie: Trie{ root: &Node{ Key: []byte{1, 2, 3}, - Encoding: []byte{67, 1, 35, 0}, + Value: []byte{1}, + Encoding: []byte{0x43, 0x01, 0x23, 0x04, 0x01}, }, }, }, @@ -468,15 +476,15 @@ func Test_Trie_Hash(t *testing.T) { Value: []byte("branch"), Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{9}}, + {Key: []byte{9}, Value: []byte{1}}, }), }, }, hash: common.Hash{ - 0xbc, 0x4b, 0x90, 0x4c, 0x65, 0xb1, 0x3b, 0x9b, - 0xcf, 0xe2, 0x32, 0xe3, 0xe6, 0x50, 0x20, 0xd8, - 0x21, 0x96, 0xce, 0xbf, 0x4c, 0xa4, 0xd, 0xaa, - 0xbe, 0x27, 0xab, 0x13, 0xcb, 0xf0, 0xfd, 0xd7}, + 0xaa, 0x7e, 0x57, 0x48, 0xb0, 0x27, 0x4d, 0x18, + 0xf5, 0x1c, 0xfd, 0x36, 0x4c, 0x4b, 0x56, 0x4a, + 0xf5, 0x37, 0x9d, 0xd7, 0xcb, 0xf5, 0x80, 0x15, + 0xf0, 0xe, 0xd3, 0x39, 0x48, 0x21, 0xe3, 0xdd}, expectedTrie: Trie{ root: &Node{ Key: []byte{1, 2, 3}, @@ -485,7 +493,8 @@ func Test_Trie_Hash(t *testing.T) { Children: padRightChildren([]*Node{ { Key: []byte{9}, - Encoding: []byte{0x41, 0x09, 0x00}, + Value: []byte{1}, + Encoding: []byte{0x41, 0x09, 0x04, 0x01}, }, }), }, @@ -671,7 +680,8 @@ func Test_Trie_NextKey(t *testing.T) { "nil key returns root leaf": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, nextKey: []byte{2}, @@ -679,7 +689,8 @@ func Test_Trie_NextKey(t *testing.T) { "key smaller than root leaf full key": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, key: []byte{0x10}, // 10 => [1, 0] in nibbles @@ -717,7 +728,8 @@ func Test_nextKey(t *testing.T) { "nil key returns root leaf": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, nextKey: []byte{2}, @@ -725,7 +737,8 @@ func Test_nextKey(t *testing.T) { "key smaller than root leaf full key": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, key: []byte{1}, @@ -734,7 +747,8 @@ func Test_nextKey(t *testing.T) { "key equal to root leaf full key": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, key: []byte{2}, @@ -742,7 +756,8 @@ func Test_nextKey(t *testing.T) { "key greater than root leaf full key": { trie: Trie{ root: &Node{ - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }, key: []byte{3}, @@ -755,7 +770,8 @@ func Test_nextKey(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }), }, @@ -771,7 +787,8 @@ func Test_nextKey(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }), }, @@ -788,7 +805,8 @@ func Test_nextKey(t *testing.T) { nil, nil, { // full key [1, 2, 3] - Key: []byte{3}, + Key: []byte{3}, + Value: []byte{1}, }, }), }, @@ -806,7 +824,8 @@ func Test_nextKey(t *testing.T) { nil, nil, { // full key [1, 2, 3] - Key: []byte{3}, + Key: []byte{3}, + Value: []byte{1}, }, }), }, @@ -823,7 +842,8 @@ func Test_nextKey(t *testing.T) { nil, nil, { // full key [1, 2, 3] - Key: []byte{3}, + Key: []byte{3}, + Value: []byte{1}, }, }), }, @@ -945,7 +965,7 @@ func Test_nextKey(t *testing.T) { Value: []byte("branch"), Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }, @@ -1170,7 +1190,7 @@ func Test_Trie_insert(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, key: []byte{1, 0}, @@ -1188,7 +1208,7 @@ func Test_Trie_insert(t *testing.T) { Generation: 1, Dirty: true, }, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, nodesCreated: 1, @@ -1290,7 +1310,8 @@ func Test_Trie_insert(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, key: []byte{1}, value: []byte("leaf"), @@ -1306,7 +1327,8 @@ func Test_Trie_insert(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, key: []byte{1}, value: []byte("leaf"), @@ -1320,6 +1342,7 @@ func Test_Trie_insert(t *testing.T) { nil, nil, { Key: []byte{}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -1362,7 +1385,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte("old"), Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{2}, @@ -1373,7 +1396,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }, @@ -1383,7 +1406,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte("old"), Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{2}, @@ -1394,7 +1417,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }, @@ -1404,7 +1427,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte{5}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{2, 3, 4, 5}, @@ -1415,7 +1438,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, nil, nil, { Key: []byte{4, 5}, @@ -1437,7 +1460,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Key: []byte{4}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }), @@ -1456,7 +1479,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, nil, nil, nil, nil, { Key: []byte{6}, @@ -1475,7 +1498,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte{5}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{2, 4, 5, 6}, @@ -1492,7 +1515,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, { @@ -1510,7 +1533,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte{5}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{3}, @@ -1527,7 +1550,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, { @@ -1545,7 +1568,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Value: []byte{5}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{}, @@ -1563,7 +1586,7 @@ func Test_Trie_insertInBranch(t *testing.T) { Dirty: true, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }), @@ -1732,15 +1755,18 @@ func Test_Trie_GetKeysWithPrefix(t *testing.T) { Descendants: 2, Children: padRightChildren([]*Node{ { // full key 0, 1, 0, 0, 4 - Key: []byte{4}, + Key: []byte{4}, + Value: []byte{1}, }, { // full key 0, 1, 0, 1, 5 - Key: []byte{5}, + Key: []byte{5}, + Value: []byte{1}, }, }), }, { // full key 0, 1, 1, 9 - Key: []byte{9}, + Key: []byte{9}, + Value: []byte{1}, }, }), }, @@ -1785,8 +1811,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -1801,8 +1827,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -1817,8 +1843,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, key: []byte{1, 3}, @@ -1830,8 +1856,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, key: []byte{1, 2, 3}, @@ -1843,8 +1869,8 @@ func Test_getKeysWithPrefix(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -1855,7 +1881,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with search key equal to common prefix": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{1, 2, 3}, @@ -1865,7 +1892,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with empty search key": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{}, @@ -1875,7 +1903,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with too deep search key": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{1, 2, 3, 4}, @@ -1884,7 +1913,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with shorter matching search key": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{1, 2}, @@ -1894,7 +1924,8 @@ func Test_getKeysWithPrefix(t *testing.T) { }, "parent leaf with not matching search key": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, key: []byte{1, 3, 3}, @@ -1931,7 +1962,8 @@ func Test_addAllKeys(t *testing.T) { }, "leaf parent": { parent: &Node{ - Key: []byte{1, 2, 3}, + Key: []byte{1, 2, 3}, + Value: []byte{1}, }, prefix: []byte{9, 8, 7}, keys: [][]byte{{1}, {2}}, @@ -1943,8 +1975,8 @@ func Test_addAllKeys(t *testing.T) { Key: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -1959,8 +1991,8 @@ func Test_addAllKeys(t *testing.T) { Value: []byte{}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{4}}, - {Key: []byte{5}}, + {Key: []byte{4}, Value: []byte{1}}, + {Key: []byte{5}, Value: []byte{1}}, }), }, prefix: []byte{9, 8, 7}, @@ -2005,7 +2037,7 @@ func Test_Trie_Get(t *testing.T) { Value: []byte{1, 2}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, { // full key 0, 1, 1, 9 @@ -2064,7 +2096,7 @@ func Test_retrieve(t *testing.T) { Value: []byte{2}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{1}, @@ -2076,7 +2108,7 @@ func Test_retrieve(t *testing.T) { Value: []byte{2}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, value: []byte{2}, @@ -2087,7 +2119,7 @@ func Test_retrieve(t *testing.T) { Value: []byte{2}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, key: []byte{1}, @@ -2159,7 +2191,8 @@ func Test_Trie_ClearPrefixLimit(t *testing.T) { Children: padRightChildren([]*Node{ nil, nil, nil, { - Key: []byte{4}, + Key: []byte{4}, + Value: []byte{1}, }, }), }, @@ -2209,7 +2242,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { }, "leaf parent with common prefix": { parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, prefix: []byte{1}, limit: 1, @@ -2219,7 +2253,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { }, "leaf parent with key equal prefix": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, prefix: []byte{1}, limit: 1, @@ -2232,12 +2267,14 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, prefix: []byte{1, 3}, limit: 1, newParent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, allDeleted: true, }, @@ -2246,12 +2283,14 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, prefix: []byte{1, 2}, limit: 1, newParent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, allDeleted: true, }, @@ -2260,8 +2299,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2275,8 +2314,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, @@ -2293,8 +2332,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1, 3}, @@ -2303,8 +2342,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1, 2}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2317,8 +2356,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1, 2, 3}, @@ -2327,8 +2366,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2341,8 +2380,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, @@ -2351,8 +2390,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2363,7 +2402,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2378,7 +2417,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, @@ -2396,7 +2435,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1, 3}, @@ -2406,7 +2445,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2420,7 +2459,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1, 2, 3}, @@ -2430,7 +2469,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2444,7 +2483,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, @@ -2454,7 +2493,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, allDeleted: true, @@ -2468,8 +2507,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2482,7 +2521,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{4}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, valuesDeleted: 1, @@ -2494,7 +2533,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, @@ -2517,8 +2556,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2537,8 +2576,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1}, @@ -2562,12 +2601,14 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 3, 0, 5 - Key: []byte{5}, + Key: []byte{5}, + Value: []byte{1}, }, }), }, { - Key: []byte{6}, + Key: []byte{6}, + Value: []byte{1}, }, }), }, @@ -2587,7 +2628,8 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Generation: 1, }, { - Key: []byte{6}, + Key: []byte{6}, + Value: []byte{1}, // Not modified so same generation as before }, }), @@ -2609,7 +2651,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, }), }, }), @@ -2634,14 +2676,15 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1, 0, 3}, limit: 3, newParent: &Node{ Key: []byte{1, 1, 4}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -2657,14 +2700,15 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, limit: 3, newParent: &Node{ Key: []byte{1, 1, 4}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -2681,7 +2725,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, @@ -2690,7 +2734,7 @@ func Test_Trie_clearPrefixLimit(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, }, @@ -2733,17 +2777,21 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, newNode: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }, "nil parent": { limit: 1, }, "delete leaf": { - parent: &Node{}, + parent: &Node{ + Value: []byte{1}, + }, limit: 2, valuesDeleted: 1, nodesRemoved: 1, @@ -2778,8 +2826,8 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Key: []byte{3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, limit: 10, @@ -2795,8 +2843,8 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Value: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, limit: 1, @@ -2808,7 +2856,7 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, valuesDeleted: 1, @@ -2823,8 +2871,8 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Value: []byte{1, 2, 3}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{1}}, - {Key: []byte{2}}, + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, limit: 2, @@ -2846,17 +2894,18 @@ func Test_Trie_deleteNodesLimit(t *testing.T) { Descendants: 3, Children: padRightChildren([]*Node{ nil, - {Key: []byte{1}}, + {Key: []byte{1}, Value: []byte{1}}, nil, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, nil, - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, prefix: []byte{1, 2}, limit: 2, newNode: &Node{ Key: []byte{3, 5, 3}, + Value: []byte{1}, Generation: 1, Dirty: true, }, @@ -2894,12 +2943,12 @@ func Test_Trie_ClearPrefix(t *testing.T) { }{ "nil prefix": { trie: Trie{ - root: &Node{}, + root: &Node{Value: []byte{1}}, }, }, "empty prefix": { trie: Trie{ - root: &Node{}, + root: &Node{Value: []byte{1}}, }, prefix: []byte{}, }, @@ -2913,14 +2962,16 @@ func Test_Trie_ClearPrefix(t *testing.T) { Descendants: 3, Children: padRightChildren([]*Node{ { // full key in nibbles 1, 2, 0, 5 - Key: []byte{5}, + Key: []byte{5}, + Value: []byte{1}, }, { // full key in nibbles 1, 2, 1, 6 Key: []byte{6}, Value: []byte("bottom branch"), Children: padRightChildren([]*Node{ { // full key in nibbles 1, 2, 1, 6, 0, 7 - Key: []byte{7}, + Key: []byte{7}, + Value: []byte{1}, }, }), }, @@ -2931,6 +2982,7 @@ func Test_Trie_ClearPrefix(t *testing.T) { expectedTrie: Trie{ root: &Node{ Key: []byte{1, 2, 0, 5}, + Value: []byte{1}, Dirty: true, }, }, @@ -2975,13 +3027,14 @@ func Test_Trie_clearPrefix(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, newParent: &Node{ Key: []byte{1, 1, 4}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -2990,14 +3043,16 @@ func Test_Trie_clearPrefix(t *testing.T) { "nil parent": {}, "leaf parent with common prefix": { parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, prefix: []byte{1}, nodesRemoved: 1, }, "leaf parent with key equal prefix": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, prefix: []byte{1}, nodesRemoved: 1, @@ -3007,11 +3062,13 @@ func Test_Trie_clearPrefix(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, prefix: []byte{1, 3}, newParent: &Node{ - Key: []byte{1, 2}, + Key: []byte{1, 2}, + Value: []byte{1}, }, }, "leaf parent with key smaller than prefix": { @@ -3019,11 +3076,13 @@ func Test_Trie_clearPrefix(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, prefix: []byte{1, 2}, newParent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }, "branch parent with common prefix": { @@ -3125,8 +3184,8 @@ func Test_Trie_clearPrefix(t *testing.T) { Value: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, - {Key: []byte{4}}, + {Key: []byte{3}, Value: []byte{1}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, prefix: []byte{1, 0, 3}, @@ -3138,7 +3197,7 @@ func Test_Trie_clearPrefix(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{4}}, + {Key: []byte{4}, Value: []byte{1}}, }), }, nodesRemoved: 1, @@ -3152,7 +3211,7 @@ func Test_Trie_clearPrefix(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, + {Key: []byte{3}, Value: []byte{1}}, }), }, prefix: []byte{1, 0}, @@ -3179,7 +3238,8 @@ func Test_Trie_clearPrefix(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 3, 0, 5 - Key: []byte{5}, + Key: []byte{5}, + Value: []byte{1}, }, }), }, @@ -3211,13 +3271,14 @@ func Test_Trie_clearPrefix(t *testing.T) { Key: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{3}}, // full key 1, 0, 3 - {Key: []byte{4}}, // full key 1, 1, 4 + {Key: []byte{3}, Value: []byte{1}}, // full key 1, 0, 3 + {Key: []byte{4}, Value: []byte{1}}, // full key 1, 1, 4 }), }, prefix: []byte{1, 0, 3}, newParent: &Node{ Key: []byte{1, 1, 4}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -3253,12 +3314,12 @@ func Test_Trie_Delete(t *testing.T) { }{ "nil key": { trie: Trie{ - root: &Node{}, + root: &Node{Value: []byte{1}}, }, }, "empty key": { trie: Trie{ - root: &Node{}, + root: &Node{Value: []byte{1}}, }, }, "empty trie": { @@ -3350,14 +3411,16 @@ func Test_Trie_delete(t *testing.T) { }, "leaf parent and nil key": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, updated: true, nodesRemoved: 1, }, "leaf parent and empty key": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, key: []byte{}, updated: true, @@ -3365,7 +3428,8 @@ func Test_Trie_delete(t *testing.T) { }, "leaf parent matches key": { parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, key: []byte{1}, updated: true, @@ -3376,11 +3440,13 @@ func Test_Trie_delete(t *testing.T) { generation: 1, }, parent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, key: []byte{2}, newParent: &Node{ - Key: []byte{1}, + Key: []byte{1}, + Value: []byte{1}, }, }, "branch parent and nil key": { @@ -3393,12 +3459,14 @@ func Test_Trie_delete(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }), }, newParent: &Node{ Key: []byte{1, 0, 2}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -3414,12 +3482,13 @@ func Test_Trie_delete(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, key: []byte{}, newParent: &Node{ Key: []byte{1, 0, 2}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -3435,12 +3504,13 @@ func Test_Trie_delete(t *testing.T) { Value: []byte{1}, Descendants: 1, Children: padRightChildren([]*Node{ - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, key: []byte{1}, newParent: &Node{ Key: []byte{1, 0, 2}, + Value: []byte{1}, Dirty: true, Generation: 1, }, @@ -3457,7 +3527,8 @@ func Test_Trie_delete(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 2 - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }), }, @@ -3503,7 +3574,8 @@ func Test_Trie_delete(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 2 - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }), }, @@ -3514,7 +3586,8 @@ func Test_Trie_delete(t *testing.T) { Descendants: 1, Children: padRightChildren([]*Node{ { // full key 1, 0, 2 - Key: []byte{2}, + Key: []byte{2}, + Value: []byte{1}, }, }), }, @@ -3556,8 +3629,8 @@ func Test_Trie_delete(t *testing.T) { Value: []byte{1}, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{2}}, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, key: []byte{1}, @@ -3567,8 +3640,8 @@ func Test_Trie_delete(t *testing.T) { Dirty: true, Descendants: 2, Children: padRightChildren([]*Node{ - {Key: []byte{2}}, - {Key: []byte{2}}, + {Key: []byte{2}, Value: []byte{1}}, + {Key: []byte{2}, Value: []byte{1}}, }), }, updated: true, @@ -3666,7 +3739,7 @@ func Test_handleDeletion(t *testing.T) { Generation: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{9}}, + {Key: []byte{9}, Value: []byte{1}}, }), }, newNode: &Node{ @@ -3675,7 +3748,7 @@ func Test_handleDeletion(t *testing.T) { Generation: 1, Children: padRightChildren([]*Node{ nil, - {Key: []byte{9}}, + {Key: []byte{9}, Value: []byte{1}}, }), }, }, @@ -3710,9 +3783,9 @@ func Test_handleDeletion(t *testing.T) { Key: []byte{9}, Value: []byte{10}, Children: padRightChildren([]*Node{ - {Key: []byte{7}}, + {Key: []byte{7}, Value: []byte{1}}, nil, - {Key: []byte{8}}, + {Key: []byte{8}, Value: []byte{1}}, }), }, }), @@ -3723,9 +3796,9 @@ func Test_handleDeletion(t *testing.T) { Generation: 1, Dirty: true, Children: padRightChildren([]*Node{ - {Key: []byte{7}}, + {Key: []byte{7}, Value: []byte{1}}, nil, - {Key: []byte{8}}, + {Key: []byte{8}, Value: []byte{1}}, }), }, branchChildMerged: true, From c6e9c8004694d9d060c52f3be85a3b851d89bf6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Mon, 4 Jul 2022 16:51:07 -0400 Subject: [PATCH 31/48] (lib/grandpa): verify justification hash relates to block being justified (#2619) * feat: compare the justification hash with the given hash Co-authored-by: Quentin McGaw --- lib/common/hash.go | 6 ++++++ lib/grandpa/errors.go | 3 ++- lib/grandpa/message_handler.go | 5 +++++ lib/grandpa/message_handler_test.go | 15 +++++++++++++++ 4 files changed, 28 insertions(+), 1 deletion(-) diff --git a/lib/common/hash.go b/lib/common/hash.go index 6ca442e829..0c33b93699 100644 --- a/lib/common/hash.go +++ b/lib/common/hash.go @@ -64,6 +64,12 @@ func (h Hash) String() string { return fmt.Sprintf("0x%x", h[:]) } +// Short returns the first 4 bytes and the last 4 bytes of the hex string for the hash +func (h Hash) Short() string { + const nBytes = 4 + return fmt.Sprintf("0x%x...%x", h[:nBytes], h[len(h)-nBytes:]) +} + // SetBytes sets the hash to the value of b. // If b is larger than len(h), b will be cropped from the left. func (h *Hash) SetBytes(b []byte) { diff --git a/lib/grandpa/errors.go b/lib/grandpa/errors.go index 1908f785cf..c2d2435c29 100644 --- a/lib/grandpa/errors.go +++ b/lib/grandpa/errors.go @@ -61,7 +61,8 @@ var ( ErrNotCommitMessage = errors.New("cannot get finalised hash from VoteMessage") // ErrNoJustification is returned when no justification can be found for a block, ie. it has not been finalised - ErrNoJustification = errors.New("no justification found for block") + ErrNoJustification = errors.New("no justification found for block") + ErrJustificationMismatch = errors.New("justification does not correspond to given block hash") ErrBlockHashMismatch = errors.New("block hash does not correspond to given block number") diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go index 766ca26b29..79dc9b101c 100644 --- a/lib/grandpa/message_handler.go +++ b/lib/grandpa/message_handler.go @@ -562,6 +562,11 @@ func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byt return nil, err } + if !hash.Equal(fj.Commit.Hash) { + return nil, fmt.Errorf("%w: justification %s and block hash %s", + ErrJustificationMismatch, fj.Commit.Hash.Short(), hash.Short()) + } + setID, err := s.grandpaState.GetSetIDByBlockNumber(uint(fj.Commit.Number)) if err != nil { return nil, fmt.Errorf("cannot get set ID from block number: %w", err) diff --git a/lib/grandpa/message_handler_test.go b/lib/grandpa/message_handler_test.go index 2f43675ed0..0f378b7817 100644 --- a/lib/grandpa/message_handler_test.go +++ b/lib/grandpa/message_handler_test.go @@ -5,6 +5,7 @@ package grandpa import ( "errors" + "fmt" "testing" "time" @@ -789,6 +790,20 @@ func TestMessageHandler_VerifyBlockJustification_invalid(t *testing.T) { returnedJust, err = gs.VerifyBlockJustification(testHash, data) require.Equal(t, ErrMinVotesNotMet, err) require.Nil(t, returnedJust) + + // mismatch justification header and block header + precommits = buildTestJustification(t, 1, round+1, setID, kr, precommit) + just = newJustification(round+1, testHash, number, precommits) + data, err = scale.Marshal(*just) + require.NoError(t, err) + otherHeader := types.NewEmptyHeader() + _, err = gs.VerifyBlockJustification(otherHeader.Hash(), data) + require.ErrorIs(t, err, ErrJustificationMismatch) + + expectedErr := fmt.Sprintf("%s: justification %s and block hash %s", ErrJustificationMismatch, + testHash.Short(), otherHeader.Hash().Short()) + assert.ErrorIs(t, err, ErrJustificationMismatch) + require.EqualError(t, err, expectedErr) } func Test_getEquivocatoryVoters(t *testing.T) { From b09eb07c025a6f5587e8370a3511057855a607ff Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Tue, 5 Jul 2022 09:47:49 -0400 Subject: [PATCH 32/48] feat(trie): decode all inlined node variants (#2611) --- internal/trie/node/decode.go | 11 +-- internal/trie/node/decode_test.go | 152 ++++++++++++++++-------------- lib/trie/database.go | 2 +- 3 files changed, 84 insertions(+), 81 deletions(-) diff --git a/internal/trie/node/decode.go b/internal/trie/node/decode.go index 2dac9d3eeb..a74994b0da 100644 --- a/internal/trie/node/decode.go +++ b/internal/trie/node/decode.go @@ -110,14 +110,11 @@ func decodeBranch(reader io.Reader, variant byte, partialKeyLength uint16) ( if len(hash) < hashLength { // Handle inlined nodes reader = bytes.NewReader(hash) - variant, partialKeyLength, err := decodeHeader(reader) - if err == nil && variant == leafVariant.bits { - childNode, err = decodeLeaf(reader, partialKeyLength) - if err != nil { - return nil, fmt.Errorf("%w: at index %d: %s", - ErrDecodeValue, i, err) - } + childNode, err = Decode(reader) + if err != nil { + return nil, fmt.Errorf("decoding inlined child at index %d: %w", i, err) } + node.Descendants += childNode.Descendants } node.Descendants++ diff --git a/internal/trie/node/decode_test.go b/internal/trie/node/decode_test.go index 2e8e0967e2..9cf9979dda 100644 --- a/internal/trie/node/decode_test.go +++ b/internal/trie/node/decode_test.go @@ -14,6 +14,10 @@ import ( ) func scaleEncodeBytes(t *testing.T, b ...byte) (encoded []byte) { + return scaleEncodeByteSlice(t, b) +} + +func scaleEncodeByteSlice(t *testing.T, b []byte) (encoded []byte) { encoded, err := scale.Marshal(b) require.NoError(t, err) return encoded @@ -98,66 +102,6 @@ func Test_Decode(t *testing.T) { Dirty: true, }, }, - "branch with two inlined children": { - reader: bytes.NewReader( - []byte{ - branchVariant.bits | 30, // key length 30 - // Key data start - 195, 101, 195, 207, 89, 214, - 113, 235, 114, 218, 14, 122, - 65, 19, 196, 16, 2, 80, 95, - 14, 123, 144, 18, 9, 107, - 65, 196, 235, 58, 175, - // Key data end - 148, 127, 110, 164, 41, 8, 0, 0, 104, 95, 15, 31, 5, - 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, - 187, 32, 134, 92, 74, 43, 127, 1, 0, 0, - }, - ), - n: &Node{ - Key: []byte{ - 12, 3, 6, 5, 12, 3, - 12, 15, 5, 9, 13, 6, - 7, 1, 14, 11, 7, 2, - 13, 10, 0, 14, 7, 10, - 4, 1, 1, 3, 12, 4, - }, - Descendants: 2, - Children: []*Node{ - nil, nil, nil, nil, - { - Key: []byte{ - 14, 7, 11, 9, 0, 1, - 2, 0, 9, 6, 11, 4, - 1, 12, 4, 14, 11, - 3, 10, 10, 15, 9, - 4, 7, 15, 6, 14, - 10, 4, 2, 9, - }, - Value: []byte{0, 0}, - Dirty: true, - }, - nil, nil, nil, nil, - { - Key: []byte{ - 15, 1, 15, 0, 5, 1, - 5, 15, 4, 6, 2, 12, - 13, 12, 15, 8, 4, - 14, 0, 15, 1, 13, - 6, 0, 4, 5, 13, - 15, 12, 11, 11, - }, - Value: []byte{ - 134, 92, 74, 43, - 127, 1, 0, 0, - }, - Dirty: true, - }, - nil, nil, nil, nil, nil, nil, - }, - Dirty: true, - }, - }, } for name, testCase := range testCases { @@ -179,6 +123,13 @@ func Test_Decode(t *testing.T) { func Test_decodeBranch(t *testing.T) { t.Parallel() + const childHashLength = 32 + childHash := make([]byte, childHashLength) + for i := range childHash { + childHash[i] = byte(i) + } + scaleEncodedChildHash := scaleEncodeByteSlice(t, childHash) + testCases := map[string]struct { reader io.Reader variant byte @@ -220,9 +171,9 @@ func Test_decodeBranch(t *testing.T) { "success for branch variant": { reader: bytes.NewBuffer( concatByteSlices([][]byte{ - {9}, // key data - {0, 4}, // children bitmap - scaleEncodeBytes(t, 1, 2, 3, 4, 5), // child hash + {9}, // key data + {0, 4}, // children bitmap + scaleEncodedChildHash, }), ), variant: branchVariant.bits, @@ -233,7 +184,7 @@ func Test_decodeBranch(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, { - HashDigest: []byte{1, 2, 3, 4, 5}, + HashDigest: childHash, Dirty: true, }, }), @@ -255,14 +206,12 @@ func Test_decodeBranch(t *testing.T) { errMessage: "cannot decode value: EOF", }, "success for branch with value": { - reader: bytes.NewBuffer( - concatByteSlices([][]byte{ - {9}, // key data - {0, 4}, // children bitmap - scaleEncodeBytes(t, 7, 8, 9), // branch value - scaleEncodeBytes(t, 1, 2, 3, 4, 5), // child hash - }), - ), + reader: bytes.NewBuffer(concatByteSlices([][]byte{ + {9}, // key data + {0, 4}, // children bitmap + scaleEncodeBytes(t, 7, 8, 9), // branch value + scaleEncodedChildHash, + })), variant: branchWithValueVariant.bits, partialKeyLength: 1, branch: &Node{ @@ -272,7 +221,7 @@ func Test_decodeBranch(t *testing.T) { nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, { - HashDigest: []byte{1, 2, 3, 4, 5}, + HashDigest: childHash, Dirty: true, }, }), @@ -280,6 +229,63 @@ func Test_decodeBranch(t *testing.T) { Descendants: 1, }, }, + "branch with inlined node decoding error": { + reader: bytes.NewBuffer(concatByteSlices([][]byte{ + {1}, // key data + {0b0000_0001, 0b0000_0000}, // children bitmap + scaleEncodeBytes(t, 1), // branch value + {0}, // garbage inlined node + })), + variant: branchWithValueVariant.bits, + partialKeyLength: 1, + errWrapped: io.EOF, + errMessage: "decoding inlined child at index 0: " + + "decoding header: reading header byte: EOF", + }, + "branch with inlined branch and leaf": { + reader: bytes.NewBuffer(concatByteSlices([][]byte{ + {1}, // key data + {0b0000_0011, 0b0000_0000}, // children bitmap + // top level inlined leaf less than 32 bytes + scaleEncodeByteSlice(t, concatByteSlices([][]byte{ + {leafVariant.bits | 1}, // partial key length of 1 + {2}, // key data + scaleEncodeBytes(t, 2), // value data + })), + // top level inlined branch less than 32 bytes + scaleEncodeByteSlice(t, concatByteSlices([][]byte{ + {branchWithValueVariant.bits | 1}, // partial key length of 1 + {3}, // key data + {0b0000_0001, 0b0000_0000}, // children bitmap + scaleEncodeBytes(t, 3), // branch value + // bottom level leaf + scaleEncodeByteSlice(t, concatByteSlices([][]byte{ + {leafVariant.bits | 1}, // partial key length of 1 + {4}, // key data + scaleEncodeBytes(t, 4), // value data + })), + })), + })), + variant: branchVariant.bits, + partialKeyLength: 1, + branch: &Node{ + Key: []byte{1}, + Descendants: 3, + Children: padRightChildren([]*Node{ + {Key: []byte{2}, Value: []byte{2}, Dirty: true}, + { + Key: []byte{3}, + Value: []byte{3}, + Dirty: true, + Descendants: 1, + Children: padRightChildren([]*Node{ + {Key: []byte{4}, Value: []byte{4}, Dirty: true}, + }), + }, + }), + Dirty: true, + }, + }, } for name, testCase := range testCases { diff --git a/lib/trie/database.go b/lib/trie/database.go index 4c608b9d1c..2a676882b4 100644 --- a/lib/trie/database.go +++ b/lib/trie/database.go @@ -182,7 +182,7 @@ func (t *Trie) load(db chaindb.Database, n *Node) error { hash := child.HashDigest - if len(hash) == 0 && child.Type() == node.Leaf { + if len(hash) == 0 { // node has already been loaded inline // just set encoding + hash digest _, _, err := child.EncodeAndHash(false) From 3ab76bc29a10efc682687e86438960eee2936a04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Tue, 5 Jul 2022 15:04:41 -0400 Subject: [PATCH 33/48] fix(state/grandpa): track changes across forks (#2519) * feat: tracking grandpa messages across forks Co-authored-by: Quentin McGaw Co-authored-by: jimboj --- dot/core/service.go | 4 +- dot/digest/digest.go | 302 ++----- dot/digest/digest_test.go | 360 +++++---- dot/digest/interface.go | 5 + dot/digest/mock_grandpa_test.go | 150 ++++ dot/mock_node_builder_test.go | 8 +- dot/node.go | 4 +- dot/node_test.go | 2 +- dot/rpc/modules/system.go | 2 +- dot/services.go | 21 +- dot/services_integration_test.go | 5 +- dot/services_test.go | 2 +- dot/state/grandpa.go | 275 ++++++- dot/state/grandpa_changes.go | 316 ++++++++ dot/state/grandpa_test.go | 1252 +++++++++++++++++++++++++++++- dot/state/initialize.go | 2 +- dot/state/service.go | 6 +- dot/state/service_test.go | 6 +- dot/types/consensus_digest.go | 8 +- dot/types/digest.go | 4 + go.mod | 3 +- go.sum | 16 +- lib/grandpa/grandpa.go | 67 +- lib/grandpa/grandpa_test.go | 27 +- lib/grandpa/mocks_test.go | 15 + lib/grandpa/round_test.go | 19 +- lib/grandpa/state.go | 6 +- lib/grandpa/vote_message_test.go | 115 ++- 28 files changed, 2412 insertions(+), 590 deletions(-) create mode 100644 dot/digest/mock_grandpa_test.go create mode 100644 dot/state/grandpa_changes.go diff --git a/dot/core/service.go b/dot/core/service.go index e4bc6d846a..cb2552eefb 100644 --- a/dot/core/service.go +++ b/dot/core/service.go @@ -22,8 +22,8 @@ import ( "github.com/ChainSafe/gossamer/lib/runtime/wasmer" "github.com/ChainSafe/gossamer/lib/services" "github.com/ChainSafe/gossamer/lib/transaction" - cscale "github.com/centrifuge/go-substrate-rpc-client/v3/scale" - ctypes "github.com/centrifuge/go-substrate-rpc-client/v3/types" + cscale "github.com/centrifuge/go-substrate-rpc-client/v4/scale" + ctypes "github.com/centrifuge/go-substrate-rpc-client/v4/types" ) var ( diff --git a/dot/digest/digest.go b/dot/digest/digest.go index 631be163f8..2d6d8203fa 100644 --- a/dot/digest/digest.go +++ b/dot/digest/digest.go @@ -18,6 +18,10 @@ var ( _ services.Service = &Handler{} ) +var ( + ErrUnknownConsensusEngineID = errors.New("unknown consensus engine ID") +) + // Handler is used to handle consensus messages and relevant authority updates to BABE and GRANDPA type Handler struct { ctx context.Context @@ -32,28 +36,9 @@ type Handler struct { imported chan *types.Block finalised chan *types.FinalisationInfo - // GRANDPA changes - grandpaScheduledChange *grandpaChange - grandpaForcedChange *grandpaChange - grandpaPause *pause - grandpaResume *resume - logger log.LeveledLogger } -type grandpaChange struct { - auths []types.Authority - atBlock uint -} - -type pause struct { - atBlock uint -} - -type resume struct { - atBlock uint -} - // NewHandler returns a new Handler func NewHandler(lvl log.Level, blockState BlockState, epochState EpochState, grandpaState GrandpaState) (*Handler, error) { @@ -91,44 +76,80 @@ func (h *Handler) Stop() error { return nil } -// NextGrandpaAuthorityChange returns the block number of the next upcoming grandpa authorities change. -// It returns 0 if no change is scheduled. -func (h *Handler) NextGrandpaAuthorityChange() (next uint) { - next = ^uint(0) - - if h.grandpaScheduledChange != nil { - next = h.grandpaScheduledChange.atBlock +// HandleDigests handles consensus digests for an imported block +func (h *Handler) HandleDigests(header *types.Header) error { + consensusDigests := h.toConsensusDigests(header.Digest.Types) + consensusDigests, err := checkForGRANDPAForcedChanges(consensusDigests) + if err != nil { + return fmt.Errorf("failed while checking GRANDPA digests: %w", err) } - if h.grandpaForcedChange != nil && h.grandpaForcedChange.atBlock < next { - next = h.grandpaForcedChange.atBlock + for i := range consensusDigests { + // avoiding implicit memory aliasing in for loop, since: + // for _, digest := range consensusDigests { &digest } + // is using the address of a loop variable + digest := consensusDigests[i] + err := h.handleConsensusDigest(&digest, header) + if err != nil { + h.logger.Errorf("cannot handle consensus digest: %w", err) + } } - if h.grandpaPause != nil && h.grandpaPause.atBlock < next { - next = h.grandpaPause.atBlock - } + return nil +} + +// toConsensusDigests converts a slice of scale.VaryingDataType to a slice of types.ConsensusDigest. +func (h *Handler) toConsensusDigests(scaleVaryingTypes []scale.VaryingDataType) []types.ConsensusDigest { + consensusDigests := make([]types.ConsensusDigest, 0, len(scaleVaryingTypes)) + + for _, d := range scaleVaryingTypes { + digest, ok := d.Value().(types.ConsensusDigest) + if !ok { + h.logger.Debugf("digest type not supported: %T", d.Value()) + continue + } - if h.grandpaResume != nil && h.grandpaResume.atBlock < next { - next = h.grandpaResume.atBlock + switch digest.ConsensusEngineID { + case types.GrandpaEngineID, types.BabeEngineID: + consensusDigests = append(consensusDigests, digest) + } } - return next + return consensusDigests } -// HandleDigests handles consensus digests for an imported block -func (h *Handler) HandleDigests(header *types.Header) { - for i, d := range header.Digest.Types { - val, ok := d.Value().(types.ConsensusDigest) - if !ok { +// checkForGRANDPAForcedChanges removes any GrandpaScheduledChange in the presence of a +// GrandpaForcedChange in the same block digest, returning a new slice of types.ConsensusDigest +func checkForGRANDPAForcedChanges(digests []types.ConsensusDigest) ([]types.ConsensusDigest, error) { + var hasForcedChange bool + digestsWithoutScheduled := make([]types.ConsensusDigest, 0, len(digests)) + for _, digest := range digests { + if digest.ConsensusEngineID != types.GrandpaEngineID { + digestsWithoutScheduled = append(digestsWithoutScheduled, digest) continue } - err := h.handleConsensusDigest(&val, header) + data := types.NewGrandpaConsensusDigest() + err := scale.Unmarshal(digest.Data, &data) if err != nil { - h.logger.Errorf("cannot handle digest for block number %d, index %d, digest %s: %s", - header.Number, i, d.Value(), err) + return nil, fmt.Errorf("cannot unmarshal GRANDPA consensus digest: %w", err) + } + + switch data.Value().(type) { + case types.GrandpaScheduledChange: + case types.GrandpaForcedChange: + hasForcedChange = true + digestsWithoutScheduled = append(digestsWithoutScheduled, digest) + default: + digestsWithoutScheduled = append(digestsWithoutScheduled, digest) } } + + if hasForcedChange { + return digestsWithoutScheduled, nil + } + + return digests, nil } func (h *Handler) handleConsensusDigest(d *types.ConsensusDigest, header *types.Header) error { @@ -139,42 +160,19 @@ func (h *Handler) handleConsensusDigest(d *types.ConsensusDigest, header *types. if err != nil { return err } - err = h.handleGrandpaConsensusDigest(data, header) - if err != nil { - return err - } - return nil + + return h.grandpaState.HandleGRANDPADigest(header, data) case types.BabeEngineID: data := types.NewBabeConsensusDigest() err := scale.Unmarshal(d.Data, &data) if err != nil { return err } - err = h.handleBabeConsensusDigest(data, header) - if err != nil { - return err - } - return nil - } - - return errors.New("unknown consensus engine ID") -} -func (h *Handler) handleGrandpaConsensusDigest(digest scale.VaryingDataType, header *types.Header) error { - switch val := digest.Value().(type) { - case types.GrandpaScheduledChange: - return h.handleScheduledChange(val, header) - case types.GrandpaForcedChange: - return h.handleForcedChange(val, header) - case types.GrandpaOnDisabled: - return nil // do nothing, as this is not implemented in substrate - case types.GrandpaPause: - return h.handlePause(val) - case types.GrandpaResume: - return h.handleResume(val) + return h.handleBabeConsensusDigest(data, header) + default: + return fmt.Errorf("%w: 0x%x", ErrUnknownConsensusEngineID, d.ConsensusEngineID.ToBytes()) } - - return errors.New("invalid consensus digest data") } func (h *Handler) handleBabeConsensusDigest(digest scale.VaryingDataType, header *types.Header) error { @@ -194,7 +192,7 @@ func (h *Handler) handleBabeConsensusDigest(digest scale.VaryingDataType, header return nil case types.BABEOnDisabled: - return h.handleBABEOnDisabled(val, header) + return nil case types.NextConfigData: currEpoch, err := h.epochState.GetEpochForBlock(header) @@ -220,10 +218,14 @@ func (h *Handler) handleBlockImport(ctx context.Context) { continue } - h.HandleDigests(&block.Header) - err := h.handleGrandpaChangesOnImport(block.Header.Number) + err := h.HandleDigests(&block.Header) if err != nil { - h.logger.Errorf("failed to handle grandpa changes on block import: %s", err) + h.logger.Errorf("failed to handle digests: %s", err) + } + + err = h.grandpaState.ApplyForcedChanges(&block.Header) + if err != nil { + h.logger.Errorf("failed to apply forced changes: %s", err) } case <-ctx.Done(): return @@ -249,159 +251,13 @@ func (h *Handler) handleBlockFinalisation(ctx context.Context) { h.logger.Errorf("failed to persist babe next epoch config: %s", err) } - err = h.handleGrandpaChangesOnFinalization(info.Header.Number) + err = h.grandpaState.ApplyScheduledChanges(&info.Header) if err != nil { - h.logger.Errorf("failed to handle grandpa changes on block finalisation: %s", err) + h.logger.Errorf("failed to apply scheduled change: %s", err) } + case <-ctx.Done(): return } } } - -func (h *Handler) handleGrandpaChangesOnImport(num uint) error { - resume := h.grandpaResume - if resume != nil && num >= resume.atBlock { - h.grandpaResume = nil - } - - fc := h.grandpaForcedChange - if fc != nil && num >= fc.atBlock { - curr, err := h.grandpaState.IncrementSetID() - if err != nil { - return err - } - - h.grandpaForcedChange = nil - h.logger.Debugf("incremented grandpa set id %d", curr) - } - - return nil -} - -func (h *Handler) handleGrandpaChangesOnFinalization(num uint) error { - pause := h.grandpaPause - if pause != nil && num >= pause.atBlock { - h.grandpaPause = nil - } - - sc := h.grandpaScheduledChange - if sc != nil && num >= sc.atBlock { - curr, err := h.grandpaState.IncrementSetID() - if err != nil { - return err - } - - h.grandpaScheduledChange = nil - h.logger.Debugf("incremented grandpa set id %d", curr) - } - - // if blocks get finalised before forced change takes place, disregard it - h.grandpaForcedChange = nil - return nil -} - -func (h *Handler) handleScheduledChange(sc types.GrandpaScheduledChange, header *types.Header) error { - curr, err := h.blockState.BestBlockHeader() - if err != nil { - return err - } - - if h.grandpaScheduledChange != nil { - return nil - } - - h.logger.Debugf("handling GrandpaScheduledChange data: %v", sc) - - c, err := newGrandpaChange(sc.Auths, sc.Delay, curr.Number) - if err != nil { - return err - } - - h.grandpaScheduledChange = c - - auths, err := types.GrandpaAuthoritiesRawToAuthorities(sc.Auths) - if err != nil { - return err - } - h.logger.Debugf("setting GrandpaScheduledChange at block %d", - header.Number+uint(sc.Delay)) - return h.grandpaState.SetNextChange( - types.NewGrandpaVotersFromAuthorities(auths), - header.Number+uint(sc.Delay), - ) -} - -func (h *Handler) handleForcedChange(fc types.GrandpaForcedChange, header *types.Header) error { - if header == nil { - return errors.New("header is nil") - } - - if h.grandpaForcedChange != nil { - return errors.New("already have forced change scheduled") - } - - h.logger.Debugf("handling GrandpaForcedChange with data %v", fc) - - c, err := newGrandpaChange(fc.Auths, fc.Delay, header.Number) - if err != nil { - return err - } - - h.grandpaForcedChange = c - - auths, err := types.GrandpaAuthoritiesRawToAuthorities(fc.Auths) - if err != nil { - return err - } - - h.logger.Debugf("setting GrandpaForcedChange at block %d", - header.Number+uint(fc.Delay)) - return h.grandpaState.SetNextChange( - types.NewGrandpaVotersFromAuthorities(auths), - header.Number+uint(fc.Delay), - ) -} - -func (h *Handler) handlePause(p types.GrandpaPause) error { - curr, err := h.blockState.BestBlockHeader() - if err != nil { - return err - } - - h.grandpaPause = &pause{ - atBlock: curr.Number + uint(p.Delay), - } - - return h.grandpaState.SetNextPause(h.grandpaPause.atBlock) -} - -func (h *Handler) handleResume(r types.GrandpaResume) error { - curr, err := h.blockState.BestBlockHeader() - if err != nil { - return err - } - - h.grandpaResume = &resume{ - atBlock: curr.Number + uint(r.Delay), - } - - return h.grandpaState.SetNextResume(h.grandpaResume.atBlock) -} - -func newGrandpaChange(raw []types.GrandpaAuthoritiesRaw, delay uint32, currBlock uint) (*grandpaChange, error) { - auths, err := types.GrandpaAuthoritiesRawToAuthorities(raw) - if err != nil { - return nil, err - } - - return &grandpaChange{ - auths: auths, - atBlock: currBlock + uint(delay), - }, nil -} - -func (h *Handler) handleBABEOnDisabled(_ types.BABEOnDisabled, _ *types.Header) error { - h.logger.Debug("handling BABEOnDisabled") - return nil -} diff --git a/dot/digest/digest_test.go b/dot/digest/digest_test.go index dcb0d8633b..fa3414d42b 100644 --- a/dot/digest/digest_test.go +++ b/dot/digest/digest_test.go @@ -13,17 +13,20 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/lib/genesis" "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/golang/mock/gomock" + "github.com/gtank/merlin" "github.com/stretchr/testify/require" ) //go:generate mockgen -destination=mock_telemetry_test.go -package $GOPACKAGE github.com/ChainSafe/gossamer/dot/telemetry Client +//go:generate mockgen -destination=mock_grandpa_test.go -package $GOPACKAGE . GrandpaState func newTestHandler(t *testing.T) (*Handler, *state.Service) { testDatadirPath := t.TempDir() @@ -59,6 +62,13 @@ func TestHandler_GrandpaScheduledChange(t *testing.T) { handler.Start() defer handler.Stop() + // create 4 blocks and finalize only blocks 0, 1, 2 + headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 4, false) + for i, h := range headers[:3] { + err := handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), uint64(i), 0) + require.NoError(t, err) + } + kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -66,7 +76,7 @@ func TestHandler_GrandpaScheduledChange(t *testing.T) { Auths: []types.GrandpaAuthoritiesRaw{ {Key: kr.Alice().Public().(*ed25519.PublicKey).AsBytes(), ID: 0}, }, - Delay: 3, + Delay: 0, } var digest = types.NewGrandpaConsensusDigest() @@ -81,25 +91,13 @@ func TestHandler_GrandpaScheduledChange(t *testing.T) { Data: data, } - header := &types.Header{ - Number: 1, - } - - err = handler.handleConsensusDigest(d, header) + // include a GrandpaScheduledChange on a block of number 3 + err = handler.handleConsensusDigest(d, headers[3]) require.NoError(t, err) - headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 2, false) - for i, h := range headers { - err = handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), uint64(i), 0) - require.NoError(t, err) - } - - // authorities should change on start of block 3 from start - headers, _ = state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 1, false) - for _, h := range headers { - err = handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), 3, 0) - require.NoError(t, err) - } + // finalize block of number 3 + err = handler.blockState.(*state.BlockState).SetFinalisedHash(headers[3].Hash(), 3, 0) + require.NoError(t, err) time.Sleep(time.Millisecond * 500) setID, err := handler.grandpaState.(*state.GrandpaState).GetCurrentSetID() @@ -118,6 +116,9 @@ func TestHandler_GrandpaForcedChange(t *testing.T) { handler.Start() defer handler.Stop() + // authorities should change on start of block 4 from start + headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 2, false) + kr, err := keystore.NewEd25519Keyring() require.NoError(t, err) @@ -140,17 +141,15 @@ func TestHandler_GrandpaForcedChange(t *testing.T) { Data: data, } - header := &types.Header{ - Number: 1, - } - - err = handler.handleConsensusDigest(d, header) + // tracking the GrandpaForcedChange under block 1 + // and when block number 4 being imported then we should apply the change + err = handler.handleConsensusDigest(d, headers[1]) require.NoError(t, err) - // authorities should change on start of block 4 from start + // create new blocks and import them state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 4, false) - time.Sleep(time.Millisecond * 100) + time.Sleep(time.Millisecond * 500) setID, err := handler.grandpaState.(*state.GrandpaState).GetCurrentSetID() require.NoError(t, err) require.Equal(t, uint64(1), setID) @@ -162,180 +161,103 @@ func TestHandler_GrandpaForcedChange(t *testing.T) { require.Equal(t, expected, auths) } -func TestHandler_GrandpaPauseAndResume(t *testing.T) { - handler, _ := newTestHandler(t) - handler.Start() - defer handler.Stop() - - p := types.GrandpaPause{ - Delay: 3, - } - - var digest = types.NewGrandpaConsensusDigest() - err := digest.Set(p) - require.NoError(t, err) - - data, err := scale.Marshal(digest) - require.NoError(t, err) - - d := &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - - err = handler.handleConsensusDigest(d, nil) - require.NoError(t, err) - nextPause, err := handler.grandpaState.(*state.GrandpaState).GetNextPause() - require.NoError(t, err) - require.Equal(t, uint(p.Delay), nextPause) - - headers, _ := state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 3, false) - for i, h := range headers { - handler.blockState.(*state.BlockState).SetFinalisedHash(h.Hash(), uint64(i), 0) - } - - time.Sleep(time.Millisecond * 100) - require.Nil(t, handler.grandpaPause) - - r := types.GrandpaResume{ - Delay: 3, - } - - var digest2 = types.NewGrandpaConsensusDigest() - err = digest2.Set(r) - require.NoError(t, err) - - data, err = scale.Marshal(digest2) - require.NoError(t, err) - - d = &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - - err = handler.handleConsensusDigest(d, nil) - require.NoError(t, err) - - state.AddBlocksToState(t, handler.blockState.(*state.BlockState), 3, false) - time.Sleep(time.Millisecond * 110) - require.Nil(t, handler.grandpaResume) - - nextResume, err := handler.grandpaState.(*state.GrandpaState).GetNextResume() - require.NoError(t, err) - require.Equal(t, uint(r.Delay+p.Delay), nextResume) -} - -func TestNextGrandpaAuthorityChange_OneChange(t *testing.T) { - handler, _ := newTestHandler(t) - handler.Start() - defer handler.Stop() - - const block uint = 3 - sc := types.GrandpaScheduledChange{ - Auths: []types.GrandpaAuthoritiesRaw{}, - Delay: uint32(block), - } - - var digest = types.NewGrandpaConsensusDigest() - err := digest.Set(sc) - require.NoError(t, err) - - data, err := scale.Marshal(digest) - require.NoError(t, err) - - d := &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - header := &types.Header{ - Number: 1, +func TestMultipleGRANDPADigests_ShouldIncludeJustForcedChanges(t *testing.T) { + tests := map[string]struct { + digestsTypes []scale.VaryingDataTypeValue + expectedHandled []scale.VaryingDataTypeValue + }{ + "forced_and_scheduled_changes_same_block": { + digestsTypes: []scale.VaryingDataTypeValue{ + types.GrandpaForcedChange{}, + types.GrandpaScheduledChange{}, + }, + expectedHandled: []scale.VaryingDataTypeValue{ + types.GrandpaForcedChange{}, + }, + }, + "only_scheduled_change_in_block": { + digestsTypes: []scale.VaryingDataTypeValue{ + types.GrandpaScheduledChange{}, + }, + expectedHandled: []scale.VaryingDataTypeValue{ + types.GrandpaScheduledChange{}, + }, + }, + "more_than_one_forced_changes_in_block": { + digestsTypes: []scale.VaryingDataTypeValue{ + types.GrandpaForcedChange{}, + types.GrandpaForcedChange{}, + types.GrandpaForcedChange{}, + types.GrandpaScheduledChange{}, + }, + expectedHandled: []scale.VaryingDataTypeValue{ + types.GrandpaForcedChange{}, + types.GrandpaForcedChange{}, + types.GrandpaForcedChange{}, + }, + }, + "multiple_consensus_digests_in_block": { + digestsTypes: []scale.VaryingDataTypeValue{ + types.GrandpaOnDisabled{}, + types.GrandpaPause{}, + types.GrandpaResume{}, + types.GrandpaForcedChange{}, + types.GrandpaScheduledChange{}, + }, + expectedHandled: []scale.VaryingDataTypeValue{ + types.GrandpaOnDisabled{}, + types.GrandpaPause{}, + types.GrandpaResume{}, + types.GrandpaForcedChange{}, + }, + }, } - err = handler.handleConsensusDigest(d, header) - require.NoError(t, err) + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + digests := types.NewDigest() - next := handler.NextGrandpaAuthorityChange() - require.Equal(t, block, next) + for _, item := range tt.digestsTypes { + var digest = types.NewGrandpaConsensusDigest() + require.NoError(t, digest.Set(item)) - nextSetID := uint64(1) - auths, err := handler.grandpaState.(*state.GrandpaState).GetAuthorities(nextSetID) - require.NoError(t, err) - expected, err := types.NewGrandpaVotersFromAuthoritiesRaw(sc.Auths) - require.NoError(t, err) - require.Equal(t, expected, auths) -} - -func TestNextGrandpaAuthorityChange_MultipleChanges(t *testing.T) { - handler, _ := newTestHandler(t) - handler.Start() - defer handler.Stop() + data, err := scale.Marshal(digest) + require.NoError(t, err) - kr, err := keystore.NewEd25519Keyring() - require.NoError(t, err) + consensusDigest := types.ConsensusDigest{ + ConsensusEngineID: types.GrandpaEngineID, + Data: data, + } - later := uint32(6) - sc := types.GrandpaScheduledChange{ - Auths: []types.GrandpaAuthoritiesRaw{}, - Delay: later, - } + require.NoError(t, digests.Add(consensusDigest)) + } - var digest = types.NewGrandpaConsensusDigest() - err = digest.Set(sc) - require.NoError(t, err) + header := &types.Header{ + Digest: digests, + } - data, err := scale.Marshal(digest) - require.NoError(t, err) + handler, _ := newTestHandler(t) + ctrl := gomock.NewController(t) + grandpaState := NewMockGrandpaState(ctrl) - d := &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } + for _, item := range tt.expectedHandled { + var digest = types.NewGrandpaConsensusDigest() + require.NoError(t, digest.Set(item)) - header := &types.Header{ - Number: 1, - } + data, err := scale.Marshal(digest) + require.NoError(t, err) - err = handler.handleConsensusDigest(d, header) - require.NoError(t, err) + expected := types.NewGrandpaConsensusDigest() + require.NoError(t, scale.Unmarshal(data, &expected)) - nextSetID := uint64(1) - auths, err := handler.grandpaState.(*state.GrandpaState).GetAuthorities(nextSetID) - require.NoError(t, err) - expected, err := types.NewGrandpaVotersFromAuthoritiesRaw(sc.Auths) - require.NoError(t, err) - require.Equal(t, expected, auths) + grandpaState.EXPECT().HandleGRANDPADigest(header, expected).Return(nil) + } - const earlier uint = 4 - fc := types.GrandpaForcedChange{ - Auths: []types.GrandpaAuthoritiesRaw{ - {Key: kr.Alice().Public().(*ed25519.PublicKey).AsBytes(), ID: 0}, - }, - Delay: uint32(earlier), + handler.grandpaState = grandpaState + handler.HandleDigests(header) + }) } - - digest = types.NewGrandpaConsensusDigest() - err = digest.Set(fc) - require.NoError(t, err) - - data, err = scale.Marshal(digest) - require.NoError(t, err) - - d = &types.ConsensusDigest{ - ConsensusEngineID: types.GrandpaEngineID, - Data: data, - } - - err = handler.handleConsensusDigest(d, header) - require.NoError(t, err) - - next := handler.NextGrandpaAuthorityChange() - require.Equal(t, earlier+1, next) - - auths, err = handler.grandpaState.(*state.GrandpaState).GetAuthorities(nextSetID) - require.NoError(t, err) - expected, err = types.NewGrandpaVotersFromAuthoritiesRaw(fc.Auths) - require.NoError(t, err) - require.Equal(t, expected, auths) } func TestHandler_HandleBABEOnDisabled(t *testing.T) { @@ -515,3 +437,71 @@ func TestHandler_HandleNextConfigData(t *testing.T) { require.NoError(t, err) require.Equal(t, act.ToConfigData(), stored) } + +func issueBlocksWithGRANDPAScheduledChanges(t *testing.T, kp *sr25519.Keypair, dh *Handler, + stateSvc *state.Service, parentHeader *types.Header, + sc types.GrandpaScheduledChange, atBlock int, size int) (headers []*types.Header) { + t.Helper() + + transcript := merlin.NewTranscript("BABE") + crypto.AppendUint64(transcript, []byte("slot number"), 1) + crypto.AppendUint64(transcript, []byte("current epoch"), 1) + transcript.AppendMessage([]byte("chain randomness"), []byte{}) + + output, proof, err := kp.VrfSign(transcript) + require.NoError(t, err) + + babePrimaryPreDigest := types.BabePrimaryPreDigest{ + SlotNumber: 1, + VRFOutput: output, + VRFProof: proof, + } + + preRuntimeDigest, err := babePrimaryPreDigest.ToPreRuntimeDigest() + require.NoError(t, err) + + digest := types.NewDigest() + + // include the consensus in the block being produced + if parentHeader.Number+1 == uint(atBlock) { + grandpaConsensusDigest := types.NewGrandpaConsensusDigest() + err = grandpaConsensusDigest.Set(sc) + require.NoError(t, err) + + grandpaDigest, err := scale.Marshal(grandpaConsensusDigest) + require.NoError(t, err) + + consensusDigest := types.ConsensusDigest{ + ConsensusEngineID: types.GrandpaEngineID, + Data: grandpaDigest, + } + require.NoError(t, digest.Add(*preRuntimeDigest, consensusDigest)) + } else { + require.NoError(t, digest.Add(*preRuntimeDigest)) + } + + header := &types.Header{ + ParentHash: parentHeader.Hash(), + Number: parentHeader.Number + 1, + Digest: digest, + } + + block := &types.Block{ + Header: *header, + Body: *types.NewBody([]types.Extrinsic{}), + } + + err = stateSvc.Block.AddBlock(block) + require.NoError(t, err) + + dh.HandleDigests(header) + + headers = append(headers, header) + + if size > 0 { + nestedHeaders := issueBlocksWithGRANDPAScheduledChanges(t, kp, dh, stateSvc, header, sc, atBlock, size-1) + headers = append(headers, nestedHeaders...) + } + + return headers +} diff --git a/dot/digest/interface.go b/dot/digest/interface.go index 268ae36c7b..2913697d0f 100644 --- a/dot/digest/interface.go +++ b/dot/digest/interface.go @@ -7,6 +7,7 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/grandpa" + "github.com/ChainSafe/gossamer/pkg/scale" ) // BlockState interface for block state methods @@ -39,4 +40,8 @@ type GrandpaState interface { SetNextPause(number uint) error SetNextResume(number uint) error GetCurrentSetID() (uint64, error) + + HandleGRANDPADigest(header *types.Header, digest scale.VaryingDataType) error + ApplyScheduledChanges(finalizedHeader *types.Header) error + ApplyForcedChanges(importedHeader *types.Header) error } diff --git a/dot/digest/mock_grandpa_test.go b/dot/digest/mock_grandpa_test.go new file mode 100644 index 0000000000..e71def0777 --- /dev/null +++ b/dot/digest/mock_grandpa_test.go @@ -0,0 +1,150 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/digest (interfaces: GrandpaState) + +// Package digest is a generated GoMock package. +package digest + +import ( + reflect "reflect" + + types "github.com/ChainSafe/gossamer/dot/types" + scale "github.com/ChainSafe/gossamer/pkg/scale" + gomock "github.com/golang/mock/gomock" +) + +// MockGrandpaState is a mock of GrandpaState interface. +type MockGrandpaState struct { + ctrl *gomock.Controller + recorder *MockGrandpaStateMockRecorder +} + +// MockGrandpaStateMockRecorder is the mock recorder for MockGrandpaState. +type MockGrandpaStateMockRecorder struct { + mock *MockGrandpaState +} + +// NewMockGrandpaState creates a new mock instance. +func NewMockGrandpaState(ctrl *gomock.Controller) *MockGrandpaState { + mock := &MockGrandpaState{ctrl: ctrl} + mock.recorder = &MockGrandpaStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGrandpaState) EXPECT() *MockGrandpaStateMockRecorder { + return m.recorder +} + +// ApplyForcedChanges mocks base method. +func (m *MockGrandpaState) ApplyForcedChanges(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyForcedChanges", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyForcedChanges indicates an expected call of ApplyForcedChanges. +func (mr *MockGrandpaStateMockRecorder) ApplyForcedChanges(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyForcedChanges", reflect.TypeOf((*MockGrandpaState)(nil).ApplyForcedChanges), arg0) +} + +// ApplyScheduledChanges mocks base method. +func (m *MockGrandpaState) ApplyScheduledChanges(arg0 *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyScheduledChanges", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyScheduledChanges indicates an expected call of ApplyScheduledChanges. +func (mr *MockGrandpaStateMockRecorder) ApplyScheduledChanges(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyScheduledChanges", reflect.TypeOf((*MockGrandpaState)(nil).ApplyScheduledChanges), arg0) +} + +// GetCurrentSetID mocks base method. +func (m *MockGrandpaState) GetCurrentSetID() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentSetID") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentSetID indicates an expected call of GetCurrentSetID. +func (mr *MockGrandpaStateMockRecorder) GetCurrentSetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSetID", reflect.TypeOf((*MockGrandpaState)(nil).GetCurrentSetID)) +} + +// HandleGRANDPADigest mocks base method. +func (m *MockGrandpaState) HandleGRANDPADigest(arg0 *types.Header, arg1 scale.VaryingDataType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HandleGRANDPADigest", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// HandleGRANDPADigest indicates an expected call of HandleGRANDPADigest. +func (mr *MockGrandpaStateMockRecorder) HandleGRANDPADigest(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleGRANDPADigest", reflect.TypeOf((*MockGrandpaState)(nil).HandleGRANDPADigest), arg0, arg1) +} + +// IncrementSetID mocks base method. +func (m *MockGrandpaState) IncrementSetID() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IncrementSetID") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IncrementSetID indicates an expected call of IncrementSetID. +func (mr *MockGrandpaStateMockRecorder) IncrementSetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrementSetID", reflect.TypeOf((*MockGrandpaState)(nil).IncrementSetID)) +} + +// SetNextChange mocks base method. +func (m *MockGrandpaState) SetNextChange(arg0 []types.GrandpaVoter, arg1 uint) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetNextChange", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetNextChange indicates an expected call of SetNextChange. +func (mr *MockGrandpaStateMockRecorder) SetNextChange(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextChange", reflect.TypeOf((*MockGrandpaState)(nil).SetNextChange), arg0, arg1) +} + +// SetNextPause mocks base method. +func (m *MockGrandpaState) SetNextPause(arg0 uint) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetNextPause", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetNextPause indicates an expected call of SetNextPause. +func (mr *MockGrandpaStateMockRecorder) SetNextPause(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextPause", reflect.TypeOf((*MockGrandpaState)(nil).SetNextPause), arg0) +} + +// SetNextResume mocks base method. +func (m *MockGrandpaState) SetNextResume(arg0 uint) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetNextResume", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetNextResume indicates an expected call of SetNextResume. +func (mr *MockGrandpaStateMockRecorder) SetNextResume(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextResume", reflect.TypeOf((*MockGrandpaState)(nil).SetNextResume), arg0) +} diff --git a/dot/mock_node_builder_test.go b/dot/mock_node_builder_test.go index 569d20cf31..7c08c9f0dc 100644 --- a/dot/mock_node_builder_test.go +++ b/dot/mock_node_builder_test.go @@ -108,18 +108,18 @@ func (mr *MocknodeBuilderIfaceMockRecorder) createDigestHandler(lvl, st interfac } // createGRANDPAService mocks base method. -func (m *MocknodeBuilderIface) createGRANDPAService(cfg *Config, st *state.Service, dh *digest.Handler, ks keystore.Keystore, net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) { +func (m *MocknodeBuilderIface) createGRANDPAService(cfg *Config, st *state.Service, ks keystore.Keystore, net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "createGRANDPAService", cfg, st, dh, ks, net, telemetryMailer) + ret := m.ctrl.Call(m, "createGRANDPAService", cfg, st, ks, net, telemetryMailer) ret0, _ := ret[0].(*grandpa.Service) ret1, _ := ret[1].(error) return ret0, ret1 } // createGRANDPAService indicates an expected call of createGRANDPAService. -func (mr *MocknodeBuilderIfaceMockRecorder) createGRANDPAService(cfg, st, dh, ks, net, telemetryMailer interface{}) *gomock.Call { +func (mr *MocknodeBuilderIfaceMockRecorder) createGRANDPAService(cfg, st, ks, net, telemetryMailer interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "createGRANDPAService", reflect.TypeOf((*MocknodeBuilderIface)(nil).createGRANDPAService), cfg, st, dh, ks, net, telemetryMailer) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "createGRANDPAService", reflect.TypeOf((*MocknodeBuilderIface)(nil).createGRANDPAService), cfg, st, ks, net, telemetryMailer) } // createNetworkService mocks base method. diff --git a/dot/node.go b/dot/node.go index 0a5a0f9264..86c6aa7de5 100644 --- a/dot/node.go +++ b/dot/node.go @@ -62,7 +62,7 @@ type nodeBuilderIface interface { createDigestHandler(lvl log.Level, st *state.Service) (*digest.Handler, error) createCoreService(cfg *Config, ks *keystore.GlobalKeystore, st *state.Service, net *network.Service, dh *digest.Handler) (*core.Service, error) - createGRANDPAService(cfg *Config, st *state.Service, dh *digest.Handler, ks keystore.Keystore, + createGRANDPAService(cfg *Config, st *state.Service, ks keystore.Keystore, net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) newSyncService(cfg *Config, st *state.Service, fg dotsync.FinalityGadget, verifier *babe.VerificationManager, cs *core.Service, net *network.Service, telemetryMailer telemetry.Client) (*dotsync.Service, error) @@ -341,7 +341,7 @@ func newNode(cfg *Config, } nodeSrvcs = append(nodeSrvcs, coreSrvc) - fg, err := builder.createGRANDPAService(cfg, stateSrvc, dh, ks.Gran, networkSrvc, telemetryMailer) + fg, err := builder.createGRANDPAService(cfg, stateSrvc, ks.Gran, networkSrvc, telemetryMailer) if err != nil { return nil, err } diff --git a/dot/node_test.go b/dot/node_test.go index 1a19e40ee8..6f56557797 100644 --- a/dot/node_test.go +++ b/dot/node_test.go @@ -197,7 +197,7 @@ func TestNewNode(t *testing.T) { gomock.AssignableToTypeOf(&network.Service{}), &digest.Handler{}). Return(&core.Service{}, nil) m.EXPECT().createGRANDPAService(dotConfig, gomock.AssignableToTypeOf(&state.Service{}), - &digest.Handler{}, ks.Gran, gomock.AssignableToTypeOf(&network.Service{}), + ks.Gran, gomock.AssignableToTypeOf(&network.Service{}), gomock.AssignableToTypeOf(&telemetry.Mailer{})). Return(&grandpa.Service{}, nil) m.EXPECT().newSyncService(dotConfig, gomock.AssignableToTypeOf(&state.Service{}), &grandpa.Service{}, diff --git a/dot/rpc/modules/system.go b/dot/rpc/modules/system.go index 79c7e6e6af..89b33992a0 100644 --- a/dot/rpc/modules/system.go +++ b/dot/rpc/modules/system.go @@ -14,7 +14,7 @@ import ( "github.com/ChainSafe/gossamer/lib/crypto" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/btcsuite/btcutil/base58" - ctypes "github.com/centrifuge/go-substrate-rpc-client/v3/types" + ctypes "github.com/centrifuge/go-substrate-rpc-client/v4/types" ) // SystemModule is an RPC module providing access to core API points diff --git a/dot/services.go b/dot/services.go index 191e80e1e6..5ee1e7e939 100644 --- a/dot/services.go +++ b/dot/services.go @@ -392,8 +392,8 @@ func (nodeBuilder) createSystemService(cfg *types.SystemInfo, stateSrvc *state.S } // createGRANDPAService creates a new GRANDPA service -func (nodeBuilder) createGRANDPAService(cfg *Config, st *state.Service, dh *digest.Handler, - ks keystore.Keystore, net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) { +func (nodeBuilder) createGRANDPAService(cfg *Config, st *state.Service, ks keystore.Keystore, + net *network.Service, telemetryMailer telemetry.Client) (*grandpa.Service, error) { rt, err := st.Block.GetRuntime(nil) if err != nil { return nil, err @@ -416,15 +416,14 @@ func (nodeBuilder) createGRANDPAService(cfg *Config, st *state.Service, dh *dige } gsCfg := &grandpa.Config{ - LogLvl: cfg.Log.FinalityGadgetLvl, - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: dh, - Voters: voters, - Authority: cfg.Core.GrandpaAuthority, - Network: net, - Interval: cfg.Core.GrandpaInterval, - Telemetry: telemetryMailer, + LogLvl: cfg.Log.FinalityGadgetLvl, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Authority: cfg.Core.GrandpaAuthority, + Network: net, + Interval: cfg.Core.GrandpaInterval, + Telemetry: telemetryMailer, } if cfg.Core.GrandpaAuthority { diff --git a/dot/services_integration_test.go b/dot/services_integration_test.go index 157f493d0d..1a7252eda6 100644 --- a/dot/services_integration_test.go +++ b/dot/services_integration_test.go @@ -298,9 +298,6 @@ func TestCreateGrandpaService(t *testing.T) { err = builder.loadRuntime(cfg, ns, stateSrvc, ks, &network.Service{}) require.NoError(t, err) - dh, err := builder.createDigestHandler(cfg.Log.DigestLvl, stateSrvc) - require.NoError(t, err) - networkConfig := &network.Config{ BasePath: t.TempDir(), NoBootstrap: true, @@ -311,7 +308,7 @@ func TestCreateGrandpaService(t *testing.T) { testNetworkService, err := network.NewService(networkConfig) require.NoError(t, err) - gs, err := builder.createGRANDPAService(cfg, stateSrvc, dh, ks.Gran, testNetworkService, nil) + gs, err := builder.createGRANDPAService(cfg, stateSrvc, ks.Gran, testNetworkService, nil) require.NoError(t, err) require.NotNil(t, gs) } diff --git a/dot/services_test.go b/dot/services_test.go index e54e2ff053..a1e9feee38 100644 --- a/dot/services_test.go +++ b/dot/services_test.go @@ -449,7 +449,7 @@ func Test_nodeBuilder_createGRANDPAService(t *testing.T) { networkSrvc, err := network.NewService(networkConfig) require.NoError(t, err) builder := nodeBuilder{} - got, err := builder.createGRANDPAService(cfg, stateSrvc, nil, tt.ks, networkSrvc, + got, err := builder.createGRANDPAService(cfg, stateSrvc, tt.ks, networkSrvc, nil) assert.ErrorIs(t, err, tt.err) // TODO: create interface for grandpa.NewService to enable testing with assert.Equal diff --git a/dot/state/grandpa.go b/dot/state/grandpa.go index 32850e9dec..50fbce2536 100644 --- a/dot/state/grandpa.go +++ b/dot/state/grandpa.go @@ -7,6 +7,7 @@ import ( "encoding/binary" "errors" "fmt" + "sync" "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" @@ -14,6 +15,15 @@ import ( "github.com/ChainSafe/gossamer/pkg/scale" ) +var ( + errPendingScheduledChanges = errors.New("pending scheduled changes needs to be applied") + errDuplicateHashes = errors.New("duplicated hashes") + errAlreadyHasForcedChange = errors.New("already has a forced change") + errUnfinalizedAncestor = errors.New("unfinalized ancestor") + + ErrNoNextAuthorityChange = errors.New("no next authority change") +) + var ( genesisSetID = uint64(0) grandpaPrefix = "grandpa" @@ -26,40 +36,276 @@ var ( // GrandpaState tracks information related to grandpa type GrandpaState struct { - db chaindb.Database + db chaindb.Database + blockState *BlockState + + forksLock sync.RWMutex + + forcedChanges *orderedPendingChanges + scheduledChangeRoots *changeTree } // NewGrandpaStateFromGenesis returns a new GrandpaState given the grandpa genesis authorities -func NewGrandpaStateFromGenesis(db chaindb.Database, genesisAuthorities []types.GrandpaVoter) (*GrandpaState, error) { +func NewGrandpaStateFromGenesis(db chaindb.Database, bs *BlockState, + genesisAuthorities []types.GrandpaVoter) (*GrandpaState, error) { grandpaDB := chaindb.NewTable(db, grandpaPrefix) s := &GrandpaState{ - db: grandpaDB, + db: grandpaDB, + blockState: bs, + scheduledChangeRoots: new(changeTree), + forcedChanges: new(orderedPendingChanges), } if err := s.setCurrentSetID(genesisSetID); err != nil { - return nil, err + return nil, fmt.Errorf("cannot set current set id: %w", err) } if err := s.SetLatestRound(0); err != nil { - return nil, err + return nil, fmt.Errorf("cannot set latest round: %w", err) } if err := s.setAuthorities(genesisSetID, genesisAuthorities); err != nil { - return nil, err + return nil, fmt.Errorf("cannot set authorities: %w", err) } - if err := s.setSetIDChangeAtBlock(genesisSetID, 0); err != nil { - return nil, err + if err := s.setChangeSetIDAtBlock(genesisSetID, 0); err != nil { + return nil, fmt.Errorf("cannot set change set id at block 0: %w", err) } return s, nil } // NewGrandpaState returns a new GrandpaState -func NewGrandpaState(db chaindb.Database) (*GrandpaState, error) { +func NewGrandpaState(db chaindb.Database, bs *BlockState) *GrandpaState { return &GrandpaState{ - db: chaindb.NewTable(db, grandpaPrefix), - }, nil + db: chaindb.NewTable(db, grandpaPrefix), + blockState: bs, + scheduledChangeRoots: new(changeTree), + forcedChanges: new(orderedPendingChanges), + } +} + +// HandleGRANDPADigest receives a decoded GRANDPA digest and calls the right function to handles the digest +func (s *GrandpaState) HandleGRANDPADigest(header *types.Header, digest scale.VaryingDataType) error { + switch val := digest.Value().(type) { + case types.GrandpaScheduledChange: + return s.addScheduledChange(header, val) + case types.GrandpaForcedChange: + return s.addForcedChange(header, val) + case types.GrandpaOnDisabled: + return nil + case types.GrandpaPause: + logger.Warn("GRANDPA Pause consensus message not implemented yet") + return nil + case types.GrandpaResume: + logger.Warn("GRANDPA Resume consensus message not implemented yet") + return nil + default: + return fmt.Errorf("not supported digest") + } +} + +func (s *GrandpaState) addForcedChange(header *types.Header, fc types.GrandpaForcedChange) error { + auths, err := types.GrandpaAuthoritiesRawToAuthorities(fc.Auths) + if err != nil { + return fmt.Errorf("cannot parse GRANDPA authorities to raw authorities: %w", err) + } + + pendingChange := pendingChange{ + bestFinalizedNumber: fc.BestFinalizedBlock, + nextAuthorities: auths, + announcingHeader: header, + delay: fc.Delay, + } + + err = s.forcedChanges.importChange(pendingChange, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot import forced change: %w", err) + } + + logger.Debugf("there are now %d possible forced changes", s.forcedChanges.Len()) + return nil +} + +func (s *GrandpaState) addScheduledChange(header *types.Header, sc types.GrandpaScheduledChange) error { + auths, err := types.GrandpaAuthoritiesRawToAuthorities(sc.Auths) + if err != nil { + return fmt.Errorf("cannot parse GRANPDA authorities to raw authorities: %w", err) + } + + pendingChange := &pendingChange{ + nextAuthorities: auths, + announcingHeader: header, + delay: sc.Delay, + } + + err = s.scheduledChangeRoots.importChange(pendingChange, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot import scheduled change: %w", err) + } + + logger.Debugf("there are now %d possible scheduled change roots", s.scheduledChangeRoots.Len()) + return nil +} + +// ApplyScheduledChanges will check the schedules changes in order to find a root +// equal or behind the finalized number and will apply its authority set changes +func (s *GrandpaState) ApplyScheduledChanges(finalizedHeader *types.Header) error { + finalizedHash := finalizedHeader.Hash() + + err := s.forcedChanges.pruneChanges(finalizedHash, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot prune non-descendant forced changes: %w", err) + } + + if s.scheduledChangeRoots.Len() == 0 { + return nil + } + + changeToApply, err := s.scheduledChangeRoots.findApplicable(finalizedHash, + finalizedHeader.Number, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot get applicable scheduled change: %w", err) + } + + if changeToApply == nil { + return nil + } + + logger.Debugf("applying scheduled change: %s", changeToApply.change) + + newSetID, err := s.IncrementSetID() + if err != nil { + return fmt.Errorf("cannot increment set id: %w", err) + } + + grandpaVotersAuthorities := types.NewGrandpaVotersFromAuthorities(changeToApply.change.nextAuthorities) + err = s.setAuthorities(newSetID, grandpaVotersAuthorities) + if err != nil { + return fmt.Errorf("cannot set authorities: %w", err) + } + + err = s.setChangeSetIDAtBlock(newSetID, changeToApply.change.effectiveNumber()) + if err != nil { + return fmt.Errorf("cannot set the change set id at block: %w", err) + } + + logger.Debugf("Applying authority set change scheduled at block #%d", + changeToApply.change.announcingHeader.Number) + + // TODO: add afg.applying_scheduled_authority_set_change telemetry info here + return nil +} + +// ApplyForcedChanges will check for if there is a scheduled forced change relative to the +// imported block and then apply it otherwise nothing happens +func (s *GrandpaState) ApplyForcedChanges(importedBlockHeader *types.Header) error { + forcedChange, err := s.forcedChanges.findApplicable(importedBlockHeader.Hash(), + importedBlockHeader.Number, s.blockState.IsDescendantOf) + if err != nil { + return fmt.Errorf("cannot find applicable forced change: %w", err) + } else if forcedChange == nil { + return nil + } + + forcedChangeHash := forcedChange.announcingHeader.Hash() + bestFinalizedNumber := forcedChange.bestFinalizedNumber + + dependant, err := s.scheduledChangeRoots.lookupChangeWhere(func(pcn *pendingChangeNode) (bool, error) { + if pcn.change.effectiveNumber() > uint(bestFinalizedNumber) { + return false, nil + } + + scheduledBlockHash := pcn.change.announcingHeader.Hash() + return s.blockState.IsDescendantOf(scheduledBlockHash, forcedChangeHash) + }) + if err != nil { + return fmt.Errorf("cannot check pending changes while applying forced change: %w", err) + } else if dependant != nil { + return fmt.Errorf("%w: %s", errPendingScheduledChanges, dependant.change) + } + + logger.Debugf("applying forced change: %s", forcedChange) + + // TODO: send the telemetry messages here + // afg.applying_forced_authority_set_change + + currentSetID, err := s.GetCurrentSetID() + if err != nil { + return fmt.Errorf("cannot get current set id: %w", err) + } + + err = s.setChangeSetIDAtBlock(currentSetID, uint(forcedChange.bestFinalizedNumber)) + if err != nil { + return fmt.Errorf("cannot set change set id at block: %w", err) + } + + newSetID, err := s.IncrementSetID() + if err != nil { + return fmt.Errorf("cannot increment set id: %w", err) + } + + grandpaVotersAuthorities := types.NewGrandpaVotersFromAuthorities(forcedChange.nextAuthorities) + err = s.setAuthorities(newSetID, grandpaVotersAuthorities) + if err != nil { + return fmt.Errorf("cannot set authorities: %w", err) + } + + err = s.setChangeSetIDAtBlock(newSetID, forcedChange.effectiveNumber()) + if err != nil { + return fmt.Errorf("cannot set change set id at block") + } + + logger.Debugf("Applying authority set forced change at block #%d", + forcedChange.announcingHeader.Number) + + return nil +} + +// NextGrandpaAuthorityChange returns the block number of the next upcoming grandpa authorities change. +// It returns 0 if no change is scheduled. +func (s *GrandpaState) NextGrandpaAuthorityChange(bestBlockHash common.Hash, bestBlockNumber uint) ( + blockNumber uint, err error) { + forcedChange, err := s.forcedChanges.lookupChangeWhere(func(pc pendingChange) (bool, error) { + isDecendant, err := s.blockState.IsDescendantOf(pc.announcingHeader.Hash(), bestBlockHash) + if err != nil { + return false, fmt.Errorf("cannot check ancestry: %w", err) + } + + return isDecendant && pc.effectiveNumber() <= bestBlockNumber, nil + }) + if err != nil { + return 0, fmt.Errorf("cannot get forced change on chain of %s: %w", + bestBlockHash, err) + } + + scheduledChangeNode, err := s.scheduledChangeRoots.lookupChangeWhere(func(pcn *pendingChangeNode) (bool, error) { + isDecendant, err := s.blockState.IsDescendantOf(pcn.change.announcingHeader.Hash(), bestBlockHash) + if err != nil { + return false, fmt.Errorf("cannot check ancestry: %w", err) + } + + return isDecendant && pcn.change.effectiveNumber() <= bestBlockNumber, nil + }) + if err != nil { + return 0, fmt.Errorf("cannot get forced change on chain of %s: %w", + bestBlockHash, err) + } + + var next uint + if scheduledChangeNode != nil { + next = scheduledChangeNode.change.effectiveNumber() + } + + if forcedChange != nil && (forcedChange.effectiveNumber() < next || next == 0) { + next = forcedChange.effectiveNumber() + } + + if next == 0 { + return 0, ErrNoNextAuthorityChange + } + + return next, nil } func authoritiesKey(setID uint64) []byte { @@ -152,7 +398,7 @@ func (s *GrandpaState) SetNextChange(authorities []types.GrandpaVoter, number ui return err } - err = s.setSetIDChangeAtBlock(nextSetID, number) + err = s.setChangeSetIDAtBlock(nextSetID, number) if err != nil { return err } @@ -177,7 +423,7 @@ func (s *GrandpaState) IncrementSetID() (newSetID uint64, err error) { } // setSetIDChangeAtBlock sets a set ID change at a certain block -func (s *GrandpaState) setSetIDChangeAtBlock(setID uint64, number uint) error { +func (s *GrandpaState) setChangeSetIDAtBlock(setID uint64, number uint) error { return s.db.Put(setIDChangeKey(setID), common.UintToBytes(number)) } @@ -206,8 +452,7 @@ func (s *GrandpaState) GetSetIDByBlockNumber(blockNumber uint) (uint64, error) { } curr = curr - 1 continue - } - if err != nil { + } else if err != nil { return 0, err } diff --git a/dot/state/grandpa_changes.go b/dot/state/grandpa_changes.go new file mode 100644 index 0000000000..f28ef37339 --- /dev/null +++ b/dot/state/grandpa_changes.go @@ -0,0 +1,316 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package state + +import ( + "fmt" + "sort" + + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +type conditionFunc[T any] func(T) (bool, error) +type isDescendantOfFunc func(parent, child common.Hash) (bool, error) + +type pendingChange struct { + bestFinalizedNumber uint32 + delay uint32 + nextAuthorities []types.Authority + announcingHeader *types.Header +} + +func (p pendingChange) String() string { + return fmt.Sprintf("announcing header: %s (%d), delay: %d, next authorities: %d", + p.announcingHeader.Hash(), p.announcingHeader.Number, p.delay, len(p.nextAuthorities)) +} + +func (p *pendingChange) effectiveNumber() uint { + return p.announcingHeader.Number + uint(p.delay) +} + +type orderedPendingChanges []pendingChange + +func (oc *orderedPendingChanges) Len() int { return len(*oc) } + +// findApplicable try to retrieve an applicable change from the slice of forced changes +func (oc orderedPendingChanges) findApplicable(importedHash common.Hash, importedNumber uint, + isDescendatOf isDescendantOfFunc) (*pendingChange, error) { + + return oc.lookupChangeWhere(func(forced pendingChange) (bool, error) { + announcingHash := forced.announcingHeader.Hash() + effectiveNumber := forced.effectiveNumber() + + if importedHash.Equal(announcingHash) && effectiveNumber == importedNumber { + return true, nil + } + + isDescendant, err := isDescendatOf(announcingHash, importedHash) + if err != nil { + return false, fmt.Errorf("cannot check ancestry: %w", err) + } + + return isDescendant && effectiveNumber == importedNumber, nil + }) + +} + +// lookupChangeWhere return the first pending change which satisfy the condition +func (oc orderedPendingChanges) lookupChangeWhere(condition conditionFunc[pendingChange]) ( + pendingChange *pendingChange, err error) { + for _, change := range oc { + ok, err := condition(change) + if err != nil { + return pendingChange, fmt.Errorf("failed while applying condition: %w", err) + } + + if ok { + return &change, nil + } + } + + return nil, nil //nolint:nilnil +} + +// importChange only tracks the pending change if and only if it is the +// unique forced change in its fork, otherwise will return an error +func (oc *orderedPendingChanges) importChange(pendingChange pendingChange, isDescendantOf isDescendantOfFunc) error { + announcingHeader := pendingChange.announcingHeader.Hash() + + for _, change := range *oc { + changeBlockHash := change.announcingHeader.Hash() + + if changeBlockHash.Equal(announcingHeader) { + return fmt.Errorf("%w: %s", errDuplicateHashes, changeBlockHash) + } + + isDescendant, err := isDescendantOf(changeBlockHash, announcingHeader) + if err != nil { + return fmt.Errorf("cannot verify ancestry: %w", err) + } + + if isDescendant { + return fmt.Errorf("%w: for block hash %s", errAlreadyHasForcedChange, changeBlockHash) + } + } + + orderedChanges := *oc + + // Use a binary search to include the pending change in the right position + // of a slice ordered by the effective number and by announcing header number + idxToInsert := sort.Search(oc.Len(), func(i int) bool { + return orderedChanges[i].effectiveNumber() >= pendingChange.effectiveNumber() && + orderedChanges[i].announcingHeader.Number >= pendingChange.announcingHeader.Number + }) + + orderedChanges = append(orderedChanges, pendingChange) + copy(orderedChanges[idxToInsert+1:], orderedChanges[idxToInsert:]) + orderedChanges[idxToInsert] = pendingChange + *oc = orderedChanges + + return nil +} + +// pruneChanges will remove changes whose are not descendant of the hash argument +// this function updates the current state of the change tree +func (oc *orderedPendingChanges) pruneChanges(hash common.Hash, isDescendantOf isDescendantOfFunc) error { + onBranchForcedChanges := make([]pendingChange, 0, oc.Len()) + + for _, forcedChange := range *oc { + isDescendant, err := isDescendantOf(hash, forcedChange.announcingHeader.Hash()) + if err != nil { + return fmt.Errorf("cannot verify ancestry: %w", err) + } + + if isDescendant { + onBranchForcedChanges = append(onBranchForcedChanges, forcedChange) + } + } + + *oc = onBranchForcedChanges + return nil +} + +type pendingChangeNode struct { + change *pendingChange + nodes []*pendingChangeNode +} + +// importNode method is called recursivelly until we found a node that import the pending change as one of +// its children. The node which should import the pending change must be a ancestor with a +// lower block number than the pending change. +func (c *pendingChangeNode) importNode(blockHash common.Hash, blockNumber uint, pendingChange *pendingChange, + isDescendantOf isDescendantOfFunc) (imported bool, err error) { + announcingHash := c.change.announcingHeader.Hash() + + if blockHash.Equal(announcingHash) { + return false, fmt.Errorf("%w: %s", errDuplicateHashes, blockHash) + } + + isDescendant, err := isDescendantOf(announcingHash, blockHash) + if err != nil { + return false, fmt.Errorf("cannot check ancestry: %w", err) + } + + if !isDescendant { + return false, nil + } + + if blockNumber <= c.change.announcingHeader.Number { + return false, nil + } + + for _, childrenNodes := range c.nodes { + imported, err := childrenNodes.importNode(blockHash, blockNumber, pendingChange, isDescendantOf) + if err != nil { + return false, err + } + + if imported { + return true, nil + } + } + + childrenNode := &pendingChangeNode{change: pendingChange} + c.nodes = append(c.nodes, childrenNode) + return true, nil +} + +// changeTree keeps track of the changes per fork allowing +// n forks in the same structure, this structure is intended +// to be an acyclic directed graph where the change nodes are +// placed by descendency order and number, you can ensure an +// node ancestry using the `isDescendantOfFunc` +type changeTree []*pendingChangeNode + +func (ct changeTree) Len() int { return len(ct) } +func (ct *changeTree) importChange(pendingChange *pendingChange, isDescendantOf isDescendantOfFunc) error { + for _, root := range *ct { + imported, err := root.importNode(pendingChange.announcingHeader.Hash(), + pendingChange.announcingHeader.Number, pendingChange, isDescendantOf) + + if err != nil { + return err + } + + if imported { + logger.Debugf("changes on header %s (%d) imported successfully", + pendingChange.announcingHeader.Hash(), pendingChange.announcingHeader.Number) + return nil + } + } + + pendingChangeNode := &pendingChangeNode{ + change: pendingChange, + } + + *ct = append(*ct, pendingChangeNode) + return nil +} + +// lookupChangesWhere returns the first change which satisfy the +// condition whithout modify the current state of the change tree +func (ct changeTree) lookupChangeWhere(condition conditionFunc[*pendingChangeNode]) ( + changeNode *pendingChangeNode, err error) { + for _, root := range ct { + ok, err := condition(root) + if err != nil { + return nil, fmt.Errorf("failed while applying condition: %w", err) + } + + if ok { + return root, nil + } + } + + return nil, nil //nolint:nilnil +} + +// findApplicable try to retrieve an applicable change +// from the tree, if it finds a change node then it will update the +// tree roots with the change node's children otherwise it will +// prune nodes that does not belongs to the same chain as `hash` argument +func (ct *changeTree) findApplicable(hash common.Hash, number uint, + isDescendantOf isDescendantOfFunc) (changeNode *pendingChangeNode, err error) { + + changeNode, err = ct.findApplicableChange(hash, number, isDescendantOf) + if err != nil { + return nil, err + } + + if changeNode == nil { + err := ct.pruneChanges(hash, isDescendantOf) + if err != nil { + return nil, fmt.Errorf("cannot prune changes: %w", err) + } + } else { + *ct = make([]*pendingChangeNode, len(changeNode.nodes)) + copy(*ct, changeNode.nodes) + } + + return changeNode, nil +} + +// findApplicableChange iterates through the change tree +// roots looking for the change node which: +// 1. contains the same hash as the one we're looking for. +// 2. contains a lower or equal effective number as the one we're looking for. +// 3. does not contains pending changes to be applied. +func (ct changeTree) findApplicableChange(hash common.Hash, number uint, + isDescendantOf isDescendantOfFunc) (changeNode *pendingChangeNode, err error) { + return ct.lookupChangeWhere(func(pcn *pendingChangeNode) (bool, error) { + if pcn.change.effectiveNumber() > number { + return false, nil + } + + changeNodeHash := pcn.change.announcingHeader.Hash() + if !hash.Equal(changeNodeHash) { + isDescendant, err := isDescendantOf(changeNodeHash, hash) + if err != nil { + return false, fmt.Errorf("cannot verify ancestry: %w", err) + } + + if !isDescendant { + return false, nil + } + } + + // the changes must be applied in order, so we need to check if our finalized header + // is ahead of any children, if it is that means some previous change was not applied + for _, child := range pcn.nodes { + isDescendant, err := isDescendantOf(child.change.announcingHeader.Hash(), hash) + if err != nil { + return false, fmt.Errorf("cannot verify ancestry: %w", err) + } + + if child.change.announcingHeader.Number <= number && isDescendant { + return false, errUnfinalizedAncestor + } + } + + return true, nil + }) +} + +// pruneChanges will remove changes whose are not descendant of the hash argument +// this function updates the current state of the change tree +func (ct *changeTree) pruneChanges(hash common.Hash, isDescendantOf isDescendantOfFunc) error { + onBranchChanges := []*pendingChangeNode{} + + for _, root := range *ct { + scheduledChangeHash := root.change.announcingHeader.Hash() + + isDescendant, err := isDescendantOf(hash, scheduledChangeHash) + if err != nil { + return fmt.Errorf("cannot verify ancestry: %w", err) + } + + if isDescendant { + onBranchChanges = append(onBranchChanges, root) + } + } + + *ct = onBranchChanges + return nil +} diff --git a/dot/state/grandpa_test.go b/dot/state/grandpa_test.go index 20bf45869a..7b58148b50 100644 --- a/dot/state/grandpa_test.go +++ b/dot/state/grandpa_test.go @@ -4,11 +4,19 @@ package state import ( + "fmt" "testing" + "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" + "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/lib/keystore" + "github.com/ChainSafe/gossamer/lib/trie" + "github.com/golang/mock/gomock" + "github.com/gtank/merlin" "github.com/stretchr/testify/require" ) @@ -22,7 +30,7 @@ var ( func TestNewGrandpaStateFromGenesis(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) currSetID, err := gs.GetCurrentSetID() @@ -40,7 +48,7 @@ func TestNewGrandpaStateFromGenesis(t *testing.T) { func TestGrandpaState_SetNextChange(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) err = gs.SetNextChange(testAuths, 1) @@ -57,7 +65,7 @@ func TestGrandpaState_SetNextChange(t *testing.T) { func TestGrandpaState_IncrementSetID(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) setID, err := gs.IncrementSetID() @@ -67,7 +75,7 @@ func TestGrandpaState_IncrementSetID(t *testing.T) { func TestGrandpaState_GetSetIDByBlockNumber(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) err = gs.SetNextChange(testAuths, 100) @@ -100,7 +108,7 @@ func TestGrandpaState_GetSetIDByBlockNumber(t *testing.T) { func TestGrandpaState_LatestRound(t *testing.T) { db := NewInMemoryDB(t) - gs, err := NewGrandpaStateFromGenesis(db, testAuths) + gs, err := NewGrandpaStateFromGenesis(db, nil, testAuths) require.NoError(t, err) r, err := gs.GetLatestRound() @@ -114,3 +122,1237 @@ func TestGrandpaState_LatestRound(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(99), r) } + +func testBlockState(t *testing.T, db chaindb.Database) *BlockState { + ctrl := gomock.NewController(t) + telemetryMock := NewMockClient(ctrl) + telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() + header := testGenesisHeader + + bs, err := NewBlockStateFromGenesis(db, newTriesEmpty(), header, telemetryMock) + require.NoError(t, err) + + // loads in-memory tries with genesis state root, should be deleted + // after another block is finalised + tr := trie.NewEmptyTrie() + err = tr.Load(bs.db, header.StateRoot) + require.NoError(t, err) + bs.tries.softSet(header.StateRoot, tr) + + return bs +} + +func TestAddScheduledChangesKeepTheRightForkTree(t *testing.T) { + t.Parallel() + + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + gs, err := NewGrandpaStateFromGenesis(db, blockState, nil) + + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, gs.blockState, testGenesisHeader, 10) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, gs.blockState, chainA[1], 9) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, gs.blockState, chainA[5], 10) + + scheduledChange := &types.GrandpaScheduledChange{ + Delay: 0, // delay of 0 means the modifications should be applied immediately + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + } + + // headersToAdd enables tracking error while adding expecific entries + // to the scheduled change fork tree, eg. + // - adding duplicate hashes entries: while adding the first entry everything should be ok, + // however when adding the second duplicated entry we should expect the errDuplicateHashes error + type headersToAdd struct { + header *types.Header + wantErr error + } + + tests := map[string]struct { + headersWithScheduledChanges []headersToAdd + expectedRoots int + highestFinalizedHeader *types.Header + }{ + "add_scheduled_changes_only_with_roots": { + headersWithScheduledChanges: []headersToAdd{ + {header: chainA[6]}, + {header: chainB[3]}, + }, + expectedRoots: 2, + }, + "add_scheduled_changes_with_roots_and_children": { + headersWithScheduledChanges: []headersToAdd{ + {header: chainA[6]}, {header: chainA[8]}, + {header: chainB[3]}, {header: chainB[7]}, {header: chainB[9]}, + {header: chainC[8]}, + }, + expectedRoots: 3, + }, + "add_scheduled_changes_with_same_hash": { + headersWithScheduledChanges: []headersToAdd{ + {header: chainA[3]}, + { + header: chainA[3], + wantErr: fmt.Errorf("cannot import scheduled change: %w: %s", + errDuplicateHashes, chainA[3].Hash()), + }, + }, + expectedRoots: 0, + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + // clear the scheduledChangeRoots after the test ends + // this does not cause race condition because t.Run without + // t.Parallel() blocks until this function returns + defer func() { + gs.scheduledChangeRoots = new(changeTree) + }() + + updateHighestFinalizedHeaderOrDefault(t, gs.blockState, tt.highestFinalizedHeader, chainA[0]) + + for _, entry := range tt.headersWithScheduledChanges { + err := gs.addScheduledChange(entry.header, *scheduledChange) + + if entry.wantErr != nil { + require.Error(t, err) + require.EqualError(t, err, entry.wantErr.Error()) + return + } + + require.NoError(t, err) + } + + require.Len(t, *gs.scheduledChangeRoots, tt.expectedRoots) + + for _, root := range *gs.scheduledChangeRoots { + parentHash := root.change.announcingHeader.Hash() + assertDescendantChildren(t, parentHash, gs.blockState.IsDescendantOf, root.nodes) + } + }) + } +} + +func assertDescendantChildren(t *testing.T, parentHash common.Hash, isDescendantOfFunc isDescendantOfFunc, + changes changeTree) { + t.Helper() + + for _, scheduled := range changes { + scheduledChangeHash := scheduled.change.announcingHeader.Hash() + isDescendant, err := isDescendantOfFunc(parentHash, scheduledChangeHash) + require.NoError(t, err) + require.Truef(t, isDescendant, "%s is not descendant of %s", scheduledChangeHash, parentHash) + + assertDescendantChildren(t, scheduledChangeHash, isDescendantOfFunc, scheduled.nodes) + } +} + +// updateHighestFinalizedHeaderOrDefault will update the current highest finalized header +// with the value of newHighest, if the newHighest is nil then it will use the def value +func updateHighestFinalizedHeaderOrDefault(t *testing.T, bs *BlockState, newHighest, def *types.Header) { + t.Helper() + + round, setID, err := bs.GetHighestRoundAndSetID() + require.NoError(t, err) + + if newHighest != nil { + bs.db.Put(finalisedHashKey(round, setID), newHighest.Hash().ToBytes()) + } else { + bs.db.Put(finalisedHashKey(round, setID), def.Hash().ToBytes()) + } +} + +func TestForcedScheduledChangesOrder(t *testing.T) { + t.Parallel() + + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + gs, err := NewGrandpaStateFromGenesis(db, blockState, nil) + require.NoError(t, err) + + aliceHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, gs.blockState, + testGenesisHeader, 5) + + bobHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyBob, gs.blockState, + aliceHeaders[1], 5) + + charlieHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, gs.blockState, + aliceHeaders[2], 6) + + forcedChanges := map[*types.Header]types.GrandpaForcedChange{ + bobHeaders[1]: { + Delay: 1, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + aliceHeaders[3]: { + Delay: 5, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + charlieHeaders[4]: { + Delay: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + } + + for header, fc := range forcedChanges { + err := gs.addForcedChange(header, fc) + require.NoError(t, err, "failed to add forced change") + } + + forcedChangesSlice := *gs.forcedChanges + for idx := 0; idx < gs.forcedChanges.Len()-1; idx++ { + currentChange := forcedChangesSlice[idx] + nextChange := forcedChangesSlice[idx+1] + + require.LessOrEqual(t, currentChange.effectiveNumber(), + nextChange.effectiveNumber()) + + require.LessOrEqual(t, currentChange.announcingHeader.Number, + nextChange.announcingHeader.Number) + } +} + +func TestShouldNotAddMoreThanOneForcedChangeInTheSameFork(t *testing.T) { + t.Parallel() + + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + gs, err := NewGrandpaStateFromGenesis(db, blockState, nil) + require.NoError(t, err) + + aliceHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, gs.blockState, + testGenesisHeader, 5) + + bobHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyBob, gs.blockState, + aliceHeaders[1], 5) + + someForcedChange := types.GrandpaForcedChange{ + Delay: 1, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + } + + // adding more than one forced changes in the same branch + err = gs.addForcedChange(aliceHeaders[3], someForcedChange) + require.NoError(t, err) + + err = gs.addForcedChange(aliceHeaders[4], someForcedChange) + require.Error(t, err) + require.ErrorIs(t, err, errAlreadyHasForcedChange) + + // adding the same forced change twice + err = gs.addForcedChange(bobHeaders[2], someForcedChange) + require.NoError(t, err) + + err = gs.addForcedChange(bobHeaders[2], someForcedChange) + require.Error(t, err) + require.ErrorIs(t, err, errDuplicateHashes) +} + +func issueBlocksWithBABEPrimary(t *testing.T, kp *sr25519.Keypair, + bs *BlockState, parentHeader *types.Header, size int) (headers []*types.Header) { + t.Helper() + + transcript := merlin.NewTranscript("BABE") + crypto.AppendUint64(transcript, []byte("slot number"), 1) + crypto.AppendUint64(transcript, []byte("current epoch"), 1) + transcript.AppendMessage([]byte("chain randomness"), []byte{}) + + output, proof, err := kp.VrfSign(transcript) + require.NoError(t, err) + + babePrimaryPreDigest := types.BabePrimaryPreDigest{ + SlotNumber: 1, + VRFOutput: output, + VRFProof: proof, + } + + preRuntimeDigest, err := babePrimaryPreDigest.ToPreRuntimeDigest() + require.NoError(t, err) + + digest := types.NewDigest() + + require.NoError(t, digest.Add(*preRuntimeDigest)) + header := &types.Header{ + ParentHash: parentHeader.Hash(), + Number: parentHeader.Number + 1, + Digest: digest, + } + + block := &types.Block{ + Header: *header, + Body: *types.NewBody([]types.Extrinsic{}), + } + + err = bs.AddBlock(block) + require.NoError(t, err) + + if size <= 0 { + headers = append(headers, header) + return headers + } + + headers = append(headers, header) + headers = append(headers, issueBlocksWithBABEPrimary(t, kp, bs, header, size-1)...) + return headers +} + +func TestNextGrandpaAuthorityChange(t *testing.T) { + t.Parallel() + + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + tests := map[string]struct { + forcedChange *types.GrandpaForcedChange + forcedChangeAnnoucingIndex int + + scheduledChange *types.GrandpaScheduledChange + scheduledChangeAnnoucingIndex int + + wantErr error + expectedBlockNumber uint + }{ + "no_forced_change_no_scheduled_change": { + wantErr: ErrNoNextAuthorityChange, + }, + "only_forced_change": { + forcedChangeAnnoucingIndex: 2, // in the chain headers slice the index 2 == block number 3 + forcedChange: &types.GrandpaForcedChange{ + Delay: 2, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + expectedBlockNumber: 5, + }, + "only_scheduled_change": { + scheduledChangeAnnoucingIndex: 3, // in the chain headers slice the index 3 == block number 4 + scheduledChange: &types.GrandpaScheduledChange{ + Delay: 4, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + expectedBlockNumber: 8, + }, + "forced_change_before_scheduled_change": { + forcedChangeAnnoucingIndex: 2, // in the chain headers slice the index 2 == block number 3 + forcedChange: &types.GrandpaForcedChange{ + Delay: 2, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + scheduledChangeAnnoucingIndex: 3, // in the chain headers slice the index 3 == block number 4 + scheduledChange: &types.GrandpaScheduledChange{ + Delay: 4, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + expectedBlockNumber: 5, // forced change occurs before the scheduled change + }, + "scheduled_change_before_forced_change": { + scheduledChangeAnnoucingIndex: 3, // in the chain headers slice the index 3 == block number 4 + scheduledChange: &types.GrandpaScheduledChange{ + Delay: 4, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + forcedChangeAnnoucingIndex: 8, // in the chain headers slice the index 8 == block number 9 + forcedChange: &types.GrandpaForcedChange{ + Delay: 1, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + expectedBlockNumber: 8, // scheduled change occurs before the forced change + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + gs, err := NewGrandpaStateFromGenesis(db, blockState, nil) + require.NoError(t, err) + + const sizeOfChain = 10 + + chainHeaders := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, gs.blockState, + testGenesisHeader, sizeOfChain) + + if tt.forcedChange != nil { + gs.addForcedChange(chainHeaders[tt.forcedChangeAnnoucingIndex], + *tt.forcedChange) + } + + if tt.scheduledChange != nil { + gs.addScheduledChange(chainHeaders[tt.scheduledChangeAnnoucingIndex], + *tt.scheduledChange) + } + + lastBlockOnChain := chainHeaders[sizeOfChain] + blockNumber, err := gs.NextGrandpaAuthorityChange(lastBlockOnChain.Hash(), lastBlockOnChain.Number) + + if tt.wantErr != nil { + require.Error(t, err) + require.EqualError(t, err, tt.wantErr.Error()) + require.Zero(t, blockNumber) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedBlockNumber, blockNumber) + } + }) + } +} + +func TestApplyForcedChanges(t *testing.T) { + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + genesisGrandpaVoters := []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + } + + genesisAuths, err := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + require.NoError(t, err) + + const sizeOfChain = 10 + genericForks := func(t *testing.T, blockState *BlockState) [][]*types.Header { + + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, blockState, testGenesisHeader, sizeOfChain) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, blockState, chainA[1], sizeOfChain) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, blockState, chainA[5], sizeOfChain) + + return [][]*types.Header{ + chainA, chainB, chainC, + } + } + + tests := map[string]struct { + wantErr error + // 2 index array where the 0 index describes the fork and the 1 index describes the header + importedHeader [2]int + expectedGRANDPAAuthoritySet []types.GrandpaAuthoritiesRaw + expectedSetID uint64 + + generateForks func(t *testing.T, blockState *BlockState) [][]*types.Header + changes func(*GrandpaState, [][]*types.Header) + }{ + "no_forced_changes": { + generateForks: genericForks, + importedHeader: [2]int{0, 3}, // chain A from and header number 4 + expectedSetID: 0, + expectedGRANDPAAuthoritySet: genesisGrandpaVoters, + }, + "apply_forced_change_without_pending_scheduled_changes": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock8 := headers[0][7] + gs.addForcedChange(chainABlock8, types.GrandpaForcedChange{ + Delay: 2, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainCBlock15 := headers[2][8] + gs.addForcedChange(chainCBlock15, types.GrandpaForcedChange{ + Delay: 1, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + importedHeader: [2]int{0, 9}, // import block number 10 from fork A + expectedSetID: 1, + expectedGRANDPAAuthoritySet: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }, + "import_block_before_forced_change_should_do_nothing": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainCBlock9 := headers[2][2] + gs.addForcedChange(chainCBlock9, types.GrandpaForcedChange{ + Delay: 3, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + importedHeader: [2]int{2, 1}, // import block number 7 from chain C + expectedSetID: 0, + expectedGRANDPAAuthoritySet: genesisGrandpaVoters, + }, + "import_block_from_another_fork_should_do_nothing": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainCBlock9 := headers[2][2] + gs.addForcedChange(chainCBlock9, types.GrandpaForcedChange{ + Delay: 3, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + importedHeader: [2]int{1, 9}, // import block number 12 from chain B + expectedSetID: 0, + expectedGRANDPAAuthoritySet: genesisGrandpaVoters, + }, + "apply_forced_change_with_pending_scheduled_changes_should_fail": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainBBlock6 := headers[1][3] + gs.addScheduledChange(chainBBlock6, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainCBlock9 := headers[2][2] + gs.addForcedChange(chainCBlock9, types.GrandpaForcedChange{ + Delay: 3, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock9 := headers[1][6] + gs.addForcedChange(chainBBlock9, types.GrandpaForcedChange{ + Delay: 2, + BestFinalizedBlock: 6, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + importedHeader: [2]int{1, 8}, // block number 11 imported + wantErr: errPendingScheduledChanges, + expectedGRANDPAAuthoritySet: genesisGrandpaVoters, + expectedSetID: 0, + }, + } + + for tname, tt := range tests { + tt := tt + + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + voters := types.NewGrandpaVotersFromAuthorities(genesisAuths) + gs, err := NewGrandpaStateFromGenesis(db, blockState, voters) + require.NoError(t, err) + + forks := tt.generateForks(t, blockState) + if tt.changes != nil { + tt.changes(gs, forks) + } + + selectedFork := forks[tt.importedHeader[0]] + selectedImportedHeader := selectedFork[tt.importedHeader[1]] + + err = gs.ApplyForcedChanges(selectedImportedHeader) + if tt.wantErr != nil { + require.Error(t, err) + require.ErrorIs(t, err, tt.wantErr) + } else { + require.NoError(t, err) + } + + currentSetID, err := gs.GetCurrentSetID() + require.NoError(t, err) + require.Equal(t, tt.expectedSetID, currentSetID) + + expectedAuths, err := types.GrandpaAuthoritiesRawToAuthorities(tt.expectedGRANDPAAuthoritySet) + require.NoError(t, err) + expectedVoters := types.NewGrandpaVotersFromAuthorities(expectedAuths) + + gotVoters, err := gs.GetAuthorities(tt.expectedSetID) + require.NoError(t, err) + + require.Equal(t, expectedVoters, gotVoters) + }) + } +} + +func TestApplyScheduledChangesKeepDescendantForcedChanges(t *testing.T) { + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + genesisGrandpaVoters := []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + } + + genesisAuths, err := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + require.NoError(t, err) + + const sizeOfChain = 10 + genericForks := func(t *testing.T, blockState *BlockState) [][]*types.Header { + + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, blockState, testGenesisHeader, sizeOfChain) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, blockState, chainA[1], sizeOfChain) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, blockState, chainA[5], sizeOfChain) + + return [][]*types.Header{ + chainA, chainB, chainC, + } + } + + tests := map[string]struct { + finalizedHeader [2]int // 2 index array where the 0 index describes the fork and the 1 index describes the header + + generateForks func(*testing.T, *BlockState) [][]*types.Header + changes func(*GrandpaState, [][]*types.Header) + + wantErr error + + expectedForcedChangesLen int + }{ + "no_forced_changes": { + generateForks: genericForks, + expectedForcedChangesLen: 0, + }, + "finalized_hash_should_keep_descendant_forced_changes": { + generateForks: genericForks, + expectedForcedChangesLen: 1, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock6 := headers[0][5] + gs.addForcedChange(chainABlock6, types.GrandpaForcedChange{ + Delay: 1, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock6 := headers[1][3] + gs.addForcedChange(chainBBlock6, types.GrandpaForcedChange{ + Delay: 2, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 3}, //finalize header number 4 from chain A + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + voters := types.NewGrandpaVotersFromAuthorities(genesisAuths) + gs, err := NewGrandpaStateFromGenesis(db, blockState, voters) + require.NoError(t, err) + + forks := tt.generateForks(t, gs.blockState) + + if tt.changes != nil { + tt.changes(gs, forks) + } + + selectedFork := forks[tt.finalizedHeader[0]] + selectedFinalizedHeader := selectedFork[tt.finalizedHeader[1]] + + err = gs.forcedChanges.pruneChanges(selectedFinalizedHeader.Hash(), gs.blockState.IsDescendantOf) + if tt.wantErr != nil { + require.EqualError(t, err, tt.wantErr.Error()) + } else { + require.NoError(t, err) + + require.Len(t, *gs.forcedChanges, tt.expectedForcedChangesLen) + + for _, forcedChange := range *gs.forcedChanges { + isDescendant, err := gs.blockState.IsDescendantOf( + selectedFinalizedHeader.Hash(), forcedChange.announcingHeader.Hash()) + + require.NoError(t, err) + require.True(t, isDescendant) + } + } + }) + } +} + +func TestApplyScheduledChangeGetApplicableChange(t *testing.T) { + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + genesisGrandpaVoters := []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + } + + genesisAuths, err := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + require.NoError(t, err) + + const sizeOfChain = 10 + genericForks := func(t *testing.T, blockState *BlockState) [][]*types.Header { + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, blockState, testGenesisHeader, sizeOfChain) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, blockState, chainA[1], sizeOfChain) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, blockState, chainA[5], sizeOfChain) + + return [][]*types.Header{ + chainA, chainB, chainC, + } + } + + tests := map[string]struct { + finalizedHeader [2]int + changes func(*GrandpaState, [][]*types.Header) + generateForks func(*testing.T, *BlockState) [][]*types.Header + wantErr error + expectedChange *pendingChange + expectedScheduledChangeRootsLen int + }{ + "empty_scheduled_changes": { + generateForks: genericForks, + finalizedHeader: [2]int{0, 1}, // finalized block from chainA header number 2 + }, + "scheduled_change_being_finalized_should_be_applied": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock6 := headers[0][5] + gs.addScheduledChange(chainABlock6, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + expectedChange: &pendingChange{ + delay: 0, + nextAuthorities: func() []types.Authority { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities( + []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + ) + return auths + }(), + }, + finalizedHeader: [2]int{0, 5}, // finalize block number 6 from chain A + }, + "apply_change_and_update_scheduled_changes_with_the_children": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainBBlock4 := headers[1][1] // block number 4 from chain B + gs.addScheduledChange(chainBBlock4, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyFerdie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyGeorge.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock7 := headers[1][4] // block number 7 from chain B + gs.addScheduledChange(chainBBlock7, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainCBlock7 := headers[2][0] // block number 7 from chain C + gs.addScheduledChange(chainCBlock7, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{1, 1}, // finalize block number 6 from chain A + expectedScheduledChangeRootsLen: 1, + expectedChange: &pendingChange{ + delay: 0, + nextAuthorities: func() []types.Authority { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities( + []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyFerdie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyGeorge.Public().(*sr25519.PublicKey).AsBytes()}, + }, + ) + return auths + }(), + }, + }, + "finalized_header_with_no_scheduled_change_should_purge_other_pending_changes": { + generateForks: genericForks, + expectedScheduledChangeRootsLen: 1, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock8 := headers[0][7] // block 8 from chain A should keep + gs.addScheduledChange(chainABlock8, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock9 := headers[1][6] // block 9 from chain B should be pruned + gs.addScheduledChange(chainBBlock9, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainCBlock8 := headers[2][1] // block 8 from chain C should be pruned + gs.addScheduledChange(chainCBlock8, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 6}, // finalize block number 7 from chain A + }, + "finalising_header_with_pending_changes_should_return_unfinalized_acestor": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock4 := headers[0][3] // block 4 from chain A + gs.addScheduledChange(chainABlock4, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + // change on block 5 from chain A should be a child + // node of scheduled change on block 4 from chain A + chainABlock5 := headers[0][5] + gs.addScheduledChange(chainABlock5, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 6}, // finalize block number 7 from chain A + wantErr: fmt.Errorf("failed while applying condition: %w", errUnfinalizedAncestor), + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + voters := types.NewGrandpaVotersFromAuthorities(genesisAuths) + gs, err := NewGrandpaStateFromGenesis(db, blockState, voters) + require.NoError(t, err) + + forks := tt.generateForks(t, gs.blockState) + + if tt.changes != nil { + tt.changes(gs, forks) + } + + // saving the current state of scheduled changes to compare + // with the next state in the case of an error (should keep the same) + previousScheduledChanges := gs.scheduledChangeRoots + + selectedChain := forks[tt.finalizedHeader[0]] + selectedHeader := selectedChain[tt.finalizedHeader[1]] + + changeNode, err := gs.scheduledChangeRoots.findApplicable(selectedHeader.Hash(), + selectedHeader.Number, gs.blockState.IsDescendantOf) + if tt.wantErr != nil { + require.EqualError(t, err, tt.wantErr.Error()) + require.Equal(t, previousScheduledChanges, gs.scheduledChangeRoots) + return + } + + if tt.expectedChange != nil { + require.NoError(t, err) + require.Equal(t, tt.expectedChange.delay, changeNode.change.delay) + require.Equal(t, tt.expectedChange.nextAuthorities, changeNode.change.nextAuthorities) + } else { + require.Nil(t, changeNode) + } + + require.Len(t, *gs.scheduledChangeRoots, tt.expectedScheduledChangeRootsLen) + // make sure all the next scheduled changes are descendant of the finalized hash + assertDescendantChildren(t, + selectedHeader.Hash(), gs.blockState.IsDescendantOf, *gs.scheduledChangeRoots) + }) + } +} + +func TestApplyScheduledChange(t *testing.T) { + keyring, err := keystore.NewSr25519Keyring() + require.NoError(t, err) + + genesisGrandpaVoters := []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + } + + const sizeOfChain = 10 + genericForks := func(t *testing.T, blockState *BlockState) [][]*types.Header { + /* + * create chainA and two forks: chainB and chainC + * + * / -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 (B) + * 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 (A) + * \ -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13 -> 14 -> 15 -> 16 (C) + */ + chainA := issueBlocksWithBABEPrimary(t, keyring.KeyAlice, blockState, testGenesisHeader, sizeOfChain) + chainB := issueBlocksWithBABEPrimary(t, keyring.KeyBob, blockState, chainA[1], sizeOfChain) + chainC := issueBlocksWithBABEPrimary(t, keyring.KeyCharlie, blockState, chainA[5], sizeOfChain) + + return [][]*types.Header{ + chainA, chainB, chainC, + } + } + + tests := map[string]struct { + finalizedHeader [2]int // 2 index array where the 0 index describes the fork and the 1 index describes the header + + generateForks func(*testing.T, *BlockState) [][]*types.Header + changes func(*GrandpaState, [][]*types.Header) + + wantErr error + expectedScheduledChangeRootsLen int + expectedForcedChangesLen int + expectedSetID uint64 + expectedAuthoritySet []types.GrandpaVoter + changeSetIDAt uint + }{ + "empty_scheduled_changes_only_update_the_forced_changes": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock6 := headers[0][5] // block number 6 from chain A + gs.addForcedChange(chainABlock6, types.GrandpaForcedChange{ + Delay: 1, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock6 := headers[1][3] // block number 6 from chain B + gs.addForcedChange(chainBBlock6, types.GrandpaForcedChange{ + Delay: 2, + BestFinalizedBlock: 3, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyCharlie.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyDave.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 3}, + expectedForcedChangesLen: 1, + expectedAuthoritySet: func() []types.GrandpaVoter { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + return types.NewGrandpaVotersFromAuthorities(auths) + }(), + }, + "pending_scheduled_changes_should_return_error": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainABlock4 := headers[0][3] // block 4 from chain A + gs.addScheduledChange(chainABlock4, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + // change on block 5 from chain A should be a child + // node of scheduled change on block 4 from chain A + chainABlock5 := headers[0][5] + gs.addScheduledChange(chainABlock5, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{0, 6}, // finalize block number 7 from chain A + wantErr: fmt.Errorf( + "cannot get applicable scheduled change: failed while applying condition: %w", errUnfinalizedAncestor), + expectedScheduledChangeRootsLen: 1, // expected one root len as the second change is a child + expectedAuthoritySet: func() []types.GrandpaVoter { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + return types.NewGrandpaVotersFromAuthorities(auths) + }(), + }, + "no_changes_to_apply_should_only_update_the_scheduled_roots": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainBBlock6 := headers[1][3] // block 6 from chain B + gs.addScheduledChange(chainBBlock6, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock8 := headers[1][5] // block number 8 from chain B + gs.addScheduledChange(chainBBlock8, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{2, 1}, // finalize block number 8 from chain C + expectedScheduledChangeRootsLen: 0, + expectedAuthoritySet: func() []types.GrandpaVoter { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + return types.NewGrandpaVotersFromAuthorities(auths) + }(), + }, + "apply_scheduled_change_should_change_voters_and_set_id": { + generateForks: genericForks, + changes: func(gs *GrandpaState, headers [][]*types.Header) { + chainBBlock6 := headers[1][3] // block 6 from chain B + gs.addScheduledChange(chainBBlock6, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + + chainBBlock8 := headers[1][5] // block number 8 from chain B + err = gs.addScheduledChange(chainBBlock8, types.GrandpaScheduledChange{ + Delay: 0, + Auths: []types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyBob.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }, + }) + }, + finalizedHeader: [2]int{1, 3}, // finalize block number 6 from chain B + // the child (block number 8 from chain B) should be the next scheduled change root + expectedScheduledChangeRootsLen: 1, + expectedSetID: 1, + changeSetIDAt: 6, + expectedAuthoritySet: func() []types.GrandpaVoter { + auths, _ := types.GrandpaAuthoritiesRawToAuthorities([]types.GrandpaAuthoritiesRaw{ + {Key: keyring.KeyAlice.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyIan.Public().(*sr25519.PublicKey).AsBytes()}, + {Key: keyring.KeyEve.Public().(*sr25519.PublicKey).AsBytes()}, + }) + return types.NewGrandpaVotersFromAuthorities(auths) + }(), + }, + } + + for tname, tt := range tests { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + db := NewInMemoryDB(t) + blockState := testBlockState(t, db) + + genesisAuths, err := types.GrandpaAuthoritiesRawToAuthorities(genesisGrandpaVoters) + require.NoError(t, err) + + voters := types.NewGrandpaVotersFromAuthorities(genesisAuths) + gs, err := NewGrandpaStateFromGenesis(db, blockState, voters) + require.NoError(t, err) + + forks := tt.generateForks(t, gs.blockState) + + if tt.changes != nil { + tt.changes(gs, forks) + } + + selectedFork := forks[tt.finalizedHeader[0]] + selectedFinalizedHeader := selectedFork[tt.finalizedHeader[1]] + + err = gs.ApplyScheduledChanges(selectedFinalizedHeader) + if tt.wantErr != nil { + require.EqualError(t, err, tt.wantErr.Error()) + } else { + require.NoError(t, err) + + // ensure the forced changes and scheduled changes + // are descendant of the latest finalized header + forcedChangeSlice := *gs.forcedChanges + for _, forcedChange := range forcedChangeSlice { + isDescendant, err := gs.blockState.IsDescendantOf( + selectedFinalizedHeader.Hash(), forcedChange.announcingHeader.Hash()) + + require.NoError(t, err) + require.True(t, isDescendant) + } + + assertDescendantChildren(t, + selectedFinalizedHeader.Hash(), gs.blockState.IsDescendantOf, *gs.scheduledChangeRoots) + } + + require.Len(t, *gs.forcedChanges, tt.expectedForcedChangesLen) + require.Len(t, *gs.scheduledChangeRoots, tt.expectedScheduledChangeRootsLen) + + currentSetID, err := gs.GetCurrentSetID() + require.NoError(t, err) + require.Equal(t, tt.expectedSetID, currentSetID) + + currentVoters, err := gs.GetAuthorities(currentSetID) + require.NoError(t, err) + require.Equal(t, tt.expectedAuthoritySet, currentVoters) + + blockNumber, err := gs.GetSetIDChange(currentSetID) + require.NoError(t, err) + require.Equal(t, tt.changeSetIDAt, blockNumber) + }) + } +} diff --git a/dot/state/initialize.go b/dot/state/initialize.go index 3769303423..cd820a71a9 100644 --- a/dot/state/initialize.go +++ b/dot/state/initialize.go @@ -89,7 +89,7 @@ func (s *Service) Initialise(gen *genesis.Genesis, header *types.Header, t *trie return fmt.Errorf("failed to load grandpa authorities: %w", err) } - grandpaState, err := NewGrandpaStateFromGenesis(db, grandpaAuths) + grandpaState, err := NewGrandpaStateFromGenesis(db, blockState, grandpaAuths) if err != nil { return fmt.Errorf("failed to create grandpa state: %s", err) } diff --git a/dot/state/service.go b/dot/state/service.go index b425fa163b..6794fee251 100644 --- a/dot/state/service.go +++ b/dot/state/service.go @@ -159,11 +159,7 @@ func (s *Service) Start() error { return fmt.Errorf("failed to create epoch state: %w", err) } - s.Grandpa, err = NewGrandpaState(s.db) - if err != nil { - return fmt.Errorf("failed to create grandpa state: %w", err) - } - + s.Grandpa = NewGrandpaState(s.db, s.Block) num, _ := s.Block.BestBlockNumber() logger.Infof( "created state service with head %s, highest number %d and genesis hash %s", diff --git a/dot/state/service_test.go b/dot/state/service_test.go index 7782a88047..d45ae15fe9 100644 --- a/dot/state/service_test.go +++ b/dot/state/service_test.go @@ -316,13 +316,13 @@ func TestService_Rewind(t *testing.T) { err = serv.Grandpa.setCurrentSetID(3) require.NoError(t, err) - err = serv.Grandpa.setSetIDChangeAtBlock(1, 5) + err = serv.Grandpa.setChangeSetIDAtBlock(1, 5) require.NoError(t, err) - err = serv.Grandpa.setSetIDChangeAtBlock(2, 8) + err = serv.Grandpa.setChangeSetIDAtBlock(2, 8) require.NoError(t, err) - err = serv.Grandpa.setSetIDChangeAtBlock(3, 10) + err = serv.Grandpa.setChangeSetIDAtBlock(3, 10) require.NoError(t, err) AddBlocksToState(t, serv.Block, 12, false) diff --git a/dot/types/consensus_digest.go b/dot/types/consensus_digest.go index 0d704e964d..b33e884555 100644 --- a/dot/types/consensus_digest.go +++ b/dot/types/consensus_digest.go @@ -31,8 +31,12 @@ func (sc GrandpaScheduledChange) Index() uint { return 1 } // GrandpaForcedChange represents a GRANDPA forced authority change type GrandpaForcedChange struct { - Auths []GrandpaAuthoritiesRaw - Delay uint32 + // BestFinalizedBlock is specified by the governance mechanism, defines + // the starting block at which Delay is applied. + // https://github.com/w3f/polkadot-spec/pull/506#issuecomment-1128849492 + BestFinalizedBlock uint32 + Auths []GrandpaAuthoritiesRaw + Delay uint32 } // Index Returns VDT index diff --git a/dot/types/digest.go b/dot/types/digest.go index e0796dcca1..4624d25837 100644 --- a/dot/types/digest.go +++ b/dot/types/digest.go @@ -29,6 +29,10 @@ func (h ConsensusEngineID) ToBytes() []byte { return b[:] } +func (h ConsensusEngineID) String() string { + return fmt.Sprintf("0x%x", h.ToBytes()) +} + // BabeEngineID is the hard-coded babe ID var BabeEngineID = ConsensusEngineID{'B', 'A', 'B', 'E'} diff --git a/go.mod b/go.mod index 548e37306b..a674ad08ec 100644 --- a/go.mod +++ b/go.mod @@ -55,6 +55,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd v0.22.0-beta // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect + github.com/centrifuge/go-substrate-rpc-client/v4 v4.0.0 github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect @@ -158,7 +159,7 @@ require ( github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/rs/cors v1.7.0 // indirect + github.com/rs/cors v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect diff --git a/go.sum b/go.sum index b424162b7a..da6ad2aeb4 100644 --- a/go.sum +++ b/go.sum @@ -155,6 +155,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/centrifuge/go-substrate-rpc-client/v3 v3.0.2 h1:SQNaOeTmW2y2fmJgR5a7KIozjaOYi34GxafQ4efGc5U= github.com/centrifuge/go-substrate-rpc-client/v3 v3.0.2/go.mod h1:ZYSX8OuIJgZ9aVdKLhIi1G4Rj42Ys4nZNsWW70yfCJc= +github.com/centrifuge/go-substrate-rpc-client/v4 v4.0.0 h1:/t8Aw7d3rCu1uqYFFG2JIoYK/W6/Af5C1+WNF6XyYL8= +github.com/centrifuge/go-substrate-rpc-client/v4 v4.0.0/go.mod h1:MDzvG8lkzMGRaO4qzvxdfJtlDtukRPqNVWG9HJybVt0= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -285,6 +287,8 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= @@ -306,8 +310,10 @@ github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= @@ -632,6 +638,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -890,6 +897,7 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= @@ -1149,8 +1157,9 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1251,7 +1260,9 @@ github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoi github.com/twitchyliquid64/golang-asm v0.0.0-20190126203739-365674df15fc h1:RTUQlKzoZZVG3umWNzOYeFecQLIh+dbxXvJp1zPQJTI= github.com/twitchyliquid64/golang-asm v0.0.0-20190126203739-365674df15fc/go.mod h1:NoCfSFWosfqMqmmD7hApkirIK9ozpHjxRnRxs1l413A= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.9 h1:cv3/KhXGBGjEXLC4bH0sLuJ9BewaAbpk5oyMOveu4pw= @@ -1508,6 +1519,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1776,6 +1788,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= diff --git a/lib/grandpa/grandpa.go b/lib/grandpa/grandpa.go index ffc2a01d9c..058ce2117b 100644 --- a/lib/grandpa/grandpa.go +++ b/lib/grandpa/grandpa.go @@ -13,6 +13,7 @@ import ( "sync/atomic" "time" + "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" @@ -46,7 +47,6 @@ type Service struct { cancel context.CancelFunc blockState BlockState grandpaState GrandpaState - digestHandler DigestHandler keypair *ed25519.Keypair // TODO: change to grandpa keystore (#1870) mapLock sync.Mutex chanLock sync.Mutex @@ -83,16 +83,15 @@ type Service struct { // Config represents a GRANDPA service configuration type Config struct { - LogLvl log.Level - BlockState BlockState - GrandpaState GrandpaState - DigestHandler DigestHandler - Network Network - Voters []Voter - Keypair *ed25519.Keypair - Authority bool - Interval time.Duration - Telemetry telemetry.Client + LogLvl log.Level + BlockState BlockState + GrandpaState GrandpaState + Network Network + Voters []Voter + Keypair *ed25519.Keypair + Authority bool + Interval time.Duration + Telemetry telemetry.Client } // NewService returns a new GRANDPA Service instance. @@ -105,10 +104,6 @@ func NewService(cfg *Config) (*Service, error) { return nil, ErrNilGrandpaState } - if cfg.DigestHandler == nil { - return nil, ErrNilDigestHandler - } - if cfg.Keypair == nil && cfg.Authority { return nil, ErrNilKeypair } @@ -157,7 +152,6 @@ func NewService(cfg *Config) (*Service, error) { state: NewState(cfg.Voters, setID, round), blockState: cfg.BlockState, grandpaState: cfg.GrandpaState, - digestHandler: cfg.DigestHandler, keypair: cfg.Keypair, authority: cfg.Authority, prevotes: new(sync.Map), @@ -244,7 +238,7 @@ func (s *Service) authorities() []*types.Authority { func (s *Service) updateAuthorities() error { currSetID, err := s.grandpaState.GetCurrentSetID() if err != nil { - return err + return fmt.Errorf("cannot get current set id: %w", err) } // set ID hasn't changed, do nothing @@ -254,7 +248,7 @@ func (s *Service) updateAuthorities() error { nextAuthorities, err := s.grandpaState.GetAuthorities(currSetID) if err != nil { - return err + return fmt.Errorf("cannot get authorities for set id %d: %w", currSetID, err) } s.state.voters = nextAuthorities @@ -300,12 +294,12 @@ func (s *Service) initiateRound() error { // if there is an authority change, execute it err := s.updateAuthorities() if err != nil { - return err + return fmt.Errorf("cannot update authorities while initiating the round: %w", err) } round, setID, err := s.blockState.GetHighestRoundAndSetID() if err != nil { - return err + return fmt.Errorf("cannot get highest round and set id: %w", err) } if round > s.state.round && setID == s.state.setID { @@ -518,7 +512,7 @@ func (s *Service) playGrandpaRound() error { go s.sendVoteMessage(prevote, vm, roundComplete) logger.Debug("receiving pre-commit messages...") - // through goroutine s.receiveMessages(ctx) + // through goroutine s.receiveVoteMessages(ctx) time.Sleep(s.interval) if s.paused.Load().(bool) { @@ -689,6 +683,11 @@ func (s *Service) deleteVote(key ed25519.PublicKeyBytes, stage Subround) { func (s *Service) determinePreVote() (*Vote, error) { var vote *Vote + bestBlockHeader, err := s.blockState.BestBlockHeader() + if err != nil { + return nil, fmt.Errorf("cannot get best block header: %w", err) + } + // if we receive a vote message from the primary with a // block that's greater than or equal to the current pre-voted block // and greater than the best final candidate from the last round, we choose that. @@ -698,15 +697,16 @@ func (s *Service) determinePreVote() (*Vote, error) { if has && prm.Vote.Number >= uint32(s.head.Number) { vote = &prm.Vote } else { - header, err := s.blockState.BestBlockHeader() - if err != nil { - return nil, err - } + vote = NewVoteFromHeader(bestBlockHeader) + } - vote = NewVoteFromHeader(header) + nextChange, err := s.grandpaState.NextGrandpaAuthorityChange(bestBlockHeader.Hash(), bestBlockHeader.Number) + if errors.Is(err, state.ErrNoNextAuthorityChange) { + return vote, nil + } else if err != nil { + return nil, fmt.Errorf("cannot get next grandpa authority change: %w", err) } - nextChange := s.digestHandler.NextGrandpaAuthorityChange() if uint(vote.Number) > nextChange { header, err := s.blockState.GetHeaderByNumber(nextChange) if err != nil { @@ -730,7 +730,18 @@ func (s *Service) determinePreCommit() (*Vote, error) { s.preVotedBlock[s.state.round] = &pvb s.mapLock.Unlock() - nextChange := s.digestHandler.NextGrandpaAuthorityChange() + bestBlockHeader, err := s.blockState.BestBlockHeader() + if err != nil { + return nil, fmt.Errorf("cannot retrieve best block header: %w", err) + } + + nextChange, err := s.grandpaState.NextGrandpaAuthorityChange(bestBlockHeader.Hash(), bestBlockHeader.Number) + if errors.Is(err, state.ErrNoNextAuthorityChange) { + return &pvb, nil + } else if err != nil { + return nil, fmt.Errorf("cannot get next grandpa authority change: %w", err) + } + if uint(pvb.Number) > nextChange { header, err := s.blockState.GetHeaderByNumber(nextChange) if err != nil { diff --git a/lib/grandpa/grandpa_test.go b/lib/grandpa/grandpa_test.go index 4aaefed652..6905755406 100644 --- a/lib/grandpa/grandpa_test.go +++ b/lib/grandpa/grandpa_test.go @@ -22,8 +22,6 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" - - "github.com/ChainSafe/gossamer/lib/grandpa/mocks" ) // testGenesisHeader is a test block header @@ -38,12 +36,6 @@ var ( voters = newTestVoters() ) -func NewMockDigestHandler() *mocks.DigestHandler { - m := new(mocks.DigestHandler) - m.On("NextGrandpaAuthorityChange").Return(uint(2 ^ 64 - 1)) - return m -} - //go:generate mockgen -destination=mock_telemetry_test.go -package $GOPACKAGE github.com/ChainSafe/gossamer/dot/telemetry Client func newTestState(t *testing.T) *state.Service { @@ -73,7 +65,7 @@ func newTestState(t *testing.T) *state.Service { require.NoError(t, err) block.StoreRuntime(block.BestBlockHash(), rt) - grandpa, err := state.NewGrandpaStateFromGenesis(db, voters) + grandpa, err := state.NewGrandpaStateFromGenesis(db, nil, voters) require.NoError(t, err) return &state.Service{ @@ -104,15 +96,14 @@ func newTestService(t *testing.T) (*Service, *state.Service) { telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Alice().(*ed25519.Keypair), - Authority: true, - Network: net, - Interval: time.Second, - Telemetry: telemetryMock, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Alice().(*ed25519.Keypair), + Authority: true, + Network: net, + Interval: time.Second, + Telemetry: telemetryMock, } gs, err := NewService(cfg) diff --git a/lib/grandpa/mocks_test.go b/lib/grandpa/mocks_test.go index a91d5301de..97ba682105 100644 --- a/lib/grandpa/mocks_test.go +++ b/lib/grandpa/mocks_test.go @@ -495,6 +495,21 @@ func (mr *MockGrandpaStateMockRecorder) GetSetIDByBlockNumber(arg0 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSetIDByBlockNumber", reflect.TypeOf((*MockGrandpaState)(nil).GetSetIDByBlockNumber), arg0) } +// NextGrandpaAuthorityChange mocks base method. +func (m *MockGrandpaState) NextGrandpaAuthorityChange(arg0 common.Hash, arg1 uint) (uint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NextGrandpaAuthorityChange", arg0, arg1) + ret0, _ := ret[0].(uint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NextGrandpaAuthorityChange indicates an expected call of NextGrandpaAuthorityChange. +func (mr *MockGrandpaStateMockRecorder) NextGrandpaAuthorityChange(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextGrandpaAuthorityChange", reflect.TypeOf((*MockGrandpaState)(nil).NextGrandpaAuthorityChange), arg0, arg1) +} + // SetLatestRound mocks base method. func (m *MockGrandpaState) SetLatestRound(arg0 uint64) error { m.ctrl.T.Helper() diff --git a/lib/grandpa/round_test.go b/lib/grandpa/round_test.go index d05508b5d6..2ed107a4cb 100644 --- a/lib/grandpa/round_test.go +++ b/lib/grandpa/round_test.go @@ -100,16 +100,15 @@ func setupGrandpa(t *testing.T, kp *ed25519.Keypair) ( SendMessage(gomock.Any()).AnyTimes() cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kp, - LogLvl: log.Info, - Authority: true, - Network: net, - Interval: time.Second, - Telemetry: telemetryMock, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kp, + LogLvl: log.Info, + Authority: true, + Network: net, + Interval: time.Second, + Telemetry: telemetryMock, } gs, err := NewService(cfg) diff --git a/lib/grandpa/state.go b/lib/grandpa/state.go index c6b546316b..15f85f7722 100644 --- a/lib/grandpa/state.go +++ b/lib/grandpa/state.go @@ -51,11 +51,7 @@ type GrandpaState interface { //nolint:revive SetPrecommits(round, setID uint64, data []SignedVote) error GetPrevotes(round, setID uint64) ([]SignedVote, error) GetPrecommits(round, setID uint64) ([]SignedVote, error) -} - -// DigestHandler is the interface required by GRANDPA for the digest handler -type DigestHandler interface { // TODO: use GrandpaState instead (#1871) - NextGrandpaAuthorityChange() uint + NextGrandpaAuthorityChange(bestBlockHash common.Hash, bestBlockNumber uint) (blockHeight uint, err error) } //go:generate mockery --name Network --structname Network --case underscore --keeptree diff --git a/lib/grandpa/vote_message_test.go b/lib/grandpa/vote_message_test.go index e1773613d7..4365e7a685 100644 --- a/lib/grandpa/vote_message_test.go +++ b/lib/grandpa/vote_message_test.go @@ -22,13 +22,12 @@ func TestCheckForEquivocation_NoEquivocation(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -57,13 +56,12 @@ func TestCheckForEquivocation_WithEquivocation(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -103,13 +101,12 @@ func TestCheckForEquivocation_WithExistingEquivocation(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -159,13 +156,12 @@ func TestValidateMessage_Valid(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -193,13 +189,12 @@ func TestValidateMessage_InvalidSignature(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -228,12 +223,11 @@ func TestValidateMessage_SetIDMismatch(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -262,13 +256,12 @@ func TestValidateMessage_Equivocation(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -306,13 +299,12 @@ func TestValidateMessage_BlockDoesNotExist(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) @@ -341,13 +333,12 @@ func TestValidateMessage_IsNotDescendant(t *testing.T) { require.NoError(t, err) cfg := &Config{ - BlockState: st.Block, - GrandpaState: st.Grandpa, - DigestHandler: NewMockDigestHandler(), - Voters: voters, - Keypair: kr.Bob().(*ed25519.Keypair), - Network: net, - Interval: time.Second, + BlockState: st.Block, + GrandpaState: st.Grandpa, + Voters: voters, + Keypair: kr.Bob().(*ed25519.Keypair), + Network: net, + Interval: time.Second, } gs, err := NewService(cfg) From d2c42b8af79dc08d5807fe3cb36a521ec0002a33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Tue, 5 Jul 2022 15:45:53 -0400 Subject: [PATCH 34/48] feat(ci): update mockery from `2.10` to `2.14` (#2642) * feat: update mockery to from 2.10 to 2.14 * chore: update devnet/ mocks --- .github/workflows/mocks.yml | 2 +- .../cmd/scale-down-ecs-service/mocks/ecsapi.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/block_api.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/block_finality_api.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/block_producer_api.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/core_api.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/network_api.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/rpcapi.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/runtime_storage_api.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/storage_api.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/sync_state_api.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/system_api.go | 17 ++++++++++++++++- dot/rpc/modules/mocks/transaction_state_api.go | 17 ++++++++++++++++- dot/state/mock_observer.go | 17 ++++++++++++++++- dot/sync/mocks/block_state.go | 17 ++++++++++++++++- lib/babe/mocks/block_import_handler.go | 17 ++++++++++++++++- lib/grandpa/mocks/network.go | 17 ++++++++++++++++- lib/runtime/mock_memory_test.go | 17 ++++++++++++++++- lib/runtime/mocks/instance.go | 17 ++++++++++++++++- lib/runtime/mocks/transaction_state.go | 17 ++++++++++++++++- lib/runtime/mocks/version.go | 17 ++++++++++++++++- lib/services/mocks/service.go | 17 ++++++++++++++++- 22 files changed, 337 insertions(+), 22 deletions(-) diff --git a/.github/workflows/mocks.yml b/.github/workflows/mocks.yml index f995c269f0..be8e92ce2a 100644 --- a/.github/workflows/mocks.yml +++ b/.github/workflows/mocks.yml @@ -21,7 +21,7 @@ jobs: stable: true check-latest: true - - run: go install github.com/vektra/mockery/v2@v2.10 + - run: go install github.com/vektra/mockery/v2@v2.14 - run: go install github.com/golang/mock/mockgen@v1.6 diff --git a/devnet/cmd/scale-down-ecs-service/mocks/ecsapi.go b/devnet/cmd/scale-down-ecs-service/mocks/ecsapi.go index c3dd446d7a..8922cbacd0 100644 --- a/devnet/cmd/scale-down-ecs-service/mocks/ecsapi.go +++ b/devnet/cmd/scale-down-ecs-service/mocks/ecsapi.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -4492,3 +4492,18 @@ func (_m *ECSAPI) WaitUntilTasksStoppedWithContext(_a0 context.Context, _a1 *ecs return r0 } + +type mockConstructorTestingTNewECSAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewECSAPI creates a new instance of ECSAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewECSAPI(t mockConstructorTestingTNewECSAPI) *ECSAPI { + mock := &ECSAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/block_api.go b/dot/rpc/modules/mocks/block_api.go index 7fa7736ddb..3486e2691a 100644 --- a/dot/rpc/modules/mocks/block_api.go +++ b/dot/rpc/modules/mocks/block_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -313,3 +313,18 @@ func (_m *BlockAPI) UnregisterRuntimeUpdatedChannel(id uint32) bool { return r0 } + +type mockConstructorTestingTNewBlockAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockAPI creates a new instance of BlockAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockAPI(t mockConstructorTestingTNewBlockAPI) *BlockAPI { + mock := &BlockAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/block_finality_api.go b/dot/rpc/modules/mocks/block_finality_api.go index 21117ca358..4f4746bf3e 100644 --- a/dot/rpc/modules/mocks/block_finality_api.go +++ b/dot/rpc/modules/mocks/block_finality_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -89,3 +89,18 @@ func (_m *BlockFinalityAPI) PreVotes() []ed25519.PublicKeyBytes { return r0 } + +type mockConstructorTestingTNewBlockFinalityAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockFinalityAPI creates a new instance of BlockFinalityAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockFinalityAPI(t mockConstructorTestingTNewBlockFinalityAPI) *BlockFinalityAPI { + mock := &BlockFinalityAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/block_producer_api.go b/dot/rpc/modules/mocks/block_producer_api.go index fd40639138..46172626b1 100644 --- a/dot/rpc/modules/mocks/block_producer_api.go +++ b/dot/rpc/modules/mocks/block_producer_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -64,3 +64,18 @@ func (_m *BlockProducerAPI) SlotDuration() uint64 { return r0 } + +type mockConstructorTestingTNewBlockProducerAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockProducerAPI creates a new instance of BlockProducerAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockProducerAPI(t mockConstructorTestingTNewBlockProducerAPI) *BlockProducerAPI { + mock := &BlockProducerAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/core_api.go b/dot/rpc/modules/mocks/core_api.go index 59105fbd01..5233ebade4 100644 --- a/dot/rpc/modules/mocks/core_api.go +++ b/dot/rpc/modules/mocks/core_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -199,3 +199,18 @@ func (_m *CoreAPI) QueryStorage(from common.Hash, to common.Hash, keys ...string return r0, r1 } + +type mockConstructorTestingTNewCoreAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewCoreAPI creates a new instance of CoreAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewCoreAPI(t mockConstructorTestingTNewCoreAPI) *CoreAPI { + mock := &CoreAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/network_api.go b/dot/rpc/modules/mocks/network_api.go index 76871f8469..52a90b48f8 100644 --- a/dot/rpc/modules/mocks/network_api.go +++ b/dot/rpc/modules/mocks/network_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -165,3 +165,18 @@ func (_m *NetworkAPI) Stop() error { return r0 } + +type mockConstructorTestingTNewNetworkAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewNetworkAPI creates a new instance of NetworkAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewNetworkAPI(t mockConstructorTestingTNewNetworkAPI) *NetworkAPI { + mock := &NetworkAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/rpcapi.go b/dot/rpc/modules/mocks/rpcapi.go index a65beb315e..bf73160143 100644 --- a/dot/rpc/modules/mocks/rpcapi.go +++ b/dot/rpc/modules/mocks/rpcapi.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -29,3 +29,18 @@ func (_m *RPCAPI) Methods() []string { return r0 } + +type mockConstructorTestingTNewRPCAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewRPCAPI creates a new instance of RPCAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewRPCAPI(t mockConstructorTestingTNewRPCAPI) *RPCAPI { + mock := &RPCAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/runtime_storage_api.go b/dot/rpc/modules/mocks/runtime_storage_api.go index 840c96d059..276a95b2c7 100644 --- a/dot/rpc/modules/mocks/runtime_storage_api.go +++ b/dot/rpc/modules/mocks/runtime_storage_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -82,3 +82,18 @@ func (_m *RuntimeStorageAPI) SetPersistent(k []byte, v []byte) error { return r0 } + +type mockConstructorTestingTNewRuntimeStorageAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewRuntimeStorageAPI creates a new instance of RuntimeStorageAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewRuntimeStorageAPI(t mockConstructorTestingTNewRuntimeStorageAPI) *RuntimeStorageAPI { + mock := &RuntimeStorageAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/storage_api.go b/dot/rpc/modules/mocks/storage_api.go index 8cb3fbe5bf..b1cff488fc 100644 --- a/dot/rpc/modules/mocks/storage_api.go +++ b/dot/rpc/modules/mocks/storage_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -186,3 +186,18 @@ func (_m *StorageAPI) RegisterStorageObserver(observer state.Observer) { func (_m *StorageAPI) UnregisterStorageObserver(observer state.Observer) { _m.Called(observer) } + +type mockConstructorTestingTNewStorageAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewStorageAPI creates a new instance of StorageAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStorageAPI(t mockConstructorTestingTNewStorageAPI) *StorageAPI { + mock := &StorageAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/sync_state_api.go b/dot/rpc/modules/mocks/sync_state_api.go index 0e9e8c078b..471978c218 100644 --- a/dot/rpc/modules/mocks/sync_state_api.go +++ b/dot/rpc/modules/mocks/sync_state_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -34,3 +34,18 @@ func (_m *SyncStateAPI) GenSyncSpec(raw bool) (*genesis.Genesis, error) { return r0, r1 } + +type mockConstructorTestingTNewSyncStateAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewSyncStateAPI creates a new instance of SyncStateAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSyncStateAPI(t mockConstructorTestingTNewSyncStateAPI) *SyncStateAPI { + mock := &SyncStateAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/system_api.go b/dot/rpc/modules/mocks/system_api.go index 21889736fb..e656e30253 100644 --- a/dot/rpc/modules/mocks/system_api.go +++ b/dot/rpc/modules/mocks/system_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -80,3 +80,18 @@ func (_m *SystemAPI) SystemVersion() string { return r0 } + +type mockConstructorTestingTNewSystemAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewSystemAPI creates a new instance of SystemAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSystemAPI(t mockConstructorTestingTNewSystemAPI) *SystemAPI { + mock := &SystemAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/rpc/modules/mocks/transaction_state_api.go b/dot/rpc/modules/mocks/transaction_state_api.go index 82fc6e7553..1379fafa58 100644 --- a/dot/rpc/modules/mocks/transaction_state_api.go +++ b/dot/rpc/modules/mocks/transaction_state_api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -100,3 +100,18 @@ func (_m *TransactionStateAPI) Pop() *transaction.ValidTransaction { return r0 } + +type mockConstructorTestingTNewTransactionStateAPI interface { + mock.TestingT + Cleanup(func()) +} + +// NewTransactionStateAPI creates a new instance of TransactionStateAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewTransactionStateAPI(t mockConstructorTestingTNewTransactionStateAPI) *TransactionStateAPI { + mock := &TransactionStateAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/state/mock_observer.go b/dot/state/mock_observer.go index dfdb42adcc..e998b2d89c 100644 --- a/dot/state/mock_observer.go +++ b/dot/state/mock_observer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package state @@ -43,3 +43,18 @@ func (_m *MockObserver) GetID() uint { func (_m *MockObserver) Update(result *SubscriptionResult) { _m.Called(result) } + +type mockConstructorTestingTNewMockObserver interface { + mock.TestingT + Cleanup(func()) +} + +// NewMockObserver creates a new instance of MockObserver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockObserver(t mockConstructorTestingTNewMockObserver) *MockObserver { + mock := &MockObserver{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/dot/sync/mocks/block_state.go b/dot/sync/mocks/block_state.go index 2f9d48d9af..5307fbd0d0 100644 --- a/dot/sync/mocks/block_state.go +++ b/dot/sync/mocks/block_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -542,3 +542,18 @@ func (_m *BlockState) SubChain(start common.Hash, end common.Hash) ([]common.Has return r0, r1 } + +type mockConstructorTestingTNewBlockState interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockState creates a new instance of BlockState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockState(t mockConstructorTestingTNewBlockState) *BlockState { + mock := &BlockState{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lib/babe/mocks/block_import_handler.go b/lib/babe/mocks/block_import_handler.go index c4c61cf002..4627a3f898 100644 --- a/lib/babe/mocks/block_import_handler.go +++ b/lib/babe/mocks/block_import_handler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -26,3 +26,18 @@ func (_m *BlockImportHandler) HandleBlockProduced(block *types.Block, state *sto return r0 } + +type mockConstructorTestingTNewBlockImportHandler interface { + mock.TestingT + Cleanup(func()) +} + +// NewBlockImportHandler creates a new instance of BlockImportHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockImportHandler(t mockConstructorTestingTNewBlockImportHandler) *BlockImportHandler { + mock := &BlockImportHandler{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lib/grandpa/mocks/network.go b/lib/grandpa/mocks/network.go index 5d7e607e03..1dc56facd9 100644 --- a/lib/grandpa/mocks/network.go +++ b/lib/grandpa/mocks/network.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -48,3 +48,18 @@ func (_m *Network) SendMessage(to peer.ID, msg network.NotificationsMessage) err return r0 } + +type mockConstructorTestingTNewNetwork interface { + mock.TestingT + Cleanup(func()) +} + +// NewNetwork creates a new instance of Network. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewNetwork(t mockConstructorTestingTNewNetwork) *Network { + mock := &Network{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lib/runtime/mock_memory_test.go b/lib/runtime/mock_memory_test.go index 9fca62f217..44305bb7d3 100644 --- a/lib/runtime/mock_memory_test.go +++ b/lib/runtime/mock_memory_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package runtime @@ -52,3 +52,18 @@ func (_m *mockMemory) Length() uint32 { return r0 } + +type mockConstructorTestingTnewMockMemory interface { + mock.TestingT + Cleanup(func()) +} + +// newMockMemory creates a new instance of mockMemory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func newMockMemory(t mockConstructorTestingTnewMockMemory) *mockMemory { + mock := &mockMemory{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lib/runtime/mocks/instance.go b/lib/runtime/mocks/instance.go index c8f2353909..b7e9ecc2d7 100644 --- a/lib/runtime/mocks/instance.go +++ b/lib/runtime/mocks/instance.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -452,3 +452,18 @@ func (_m *Instance) Version() (runtime.Version, error) { return r0, r1 } + +type mockConstructorTestingTNewInstance interface { + mock.TestingT + Cleanup(func()) +} + +// NewInstance creates a new instance of Instance. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewInstance(t mockConstructorTestingTNewInstance) *Instance { + mock := &Instance{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lib/runtime/mocks/transaction_state.go b/lib/runtime/mocks/transaction_state.go index f7dd59036c..686cb78741 100644 --- a/lib/runtime/mocks/transaction_state.go +++ b/lib/runtime/mocks/transaction_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -29,3 +29,18 @@ func (_m *TransactionState) AddToPool(vt *transaction.ValidTransaction) common.H return r0 } + +type mockConstructorTestingTNewTransactionState interface { + mock.TestingT + Cleanup(func()) +} + +// NewTransactionState creates a new instance of TransactionState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewTransactionState(t mockConstructorTestingTNewTransactionState) *TransactionState { + mock := &TransactionState{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lib/runtime/mocks/version.go b/lib/runtime/mocks/version.go index 1999794717..264ceab55e 100644 --- a/lib/runtime/mocks/version.go +++ b/lib/runtime/mocks/version.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -138,3 +138,18 @@ func (_m *Version) TransactionVersion() uint32 { return r0 } + +type mockConstructorTestingTNewVersion interface { + mock.TestingT + Cleanup(func()) +} + +// NewVersion creates a new instance of Version. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewVersion(t mockConstructorTestingTNewVersion) *Version { + mock := &Version{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/lib/services/mocks/service.go b/lib/services/mocks/service.go index d6af8c0324..d2c6c71b15 100644 --- a/lib/services/mocks/service.go +++ b/lib/services/mocks/service.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. +// Code generated by mockery v2.14.0. DO NOT EDIT. package mocks @@ -36,3 +36,18 @@ func (_m *Service) Stop() error { return r0 } + +type mockConstructorTestingTNewService interface { + mock.TestingT + Cleanup(func()) +} + +// NewService creates a new instance of Service. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewService(t mockConstructorTestingTNewService) *Service { + mock := &Service{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} From 7eede9ac3bd578a9464e5d34c027c7ab3cf862b2 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Tue, 5 Jul 2022 16:37:16 -0400 Subject: [PATCH 35/48] fix(tests): Fix wasmer flaky sorts (#2643) --- lib/runtime/wasmer/imports_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/runtime/wasmer/imports_test.go b/lib/runtime/wasmer/imports_test.go index 3e28ccd062..c173cccaf7 100644 --- a/lib/runtime/wasmer/imports_test.go +++ b/lib/runtime/wasmer/imports_test.go @@ -758,7 +758,9 @@ func Test_ext_crypto_ed25519_public_keys_version_1(t *testing.T) { copy(pubKeys[i][:], kp.Public().Encode()) } - sort.Slice(pubKeys, func(i int, j int) bool { return pubKeys[i][0] < pubKeys[j][0] }) + sort.Slice(pubKeys, func(i int, j int) bool { + return bytes.Compare(pubKeys[i][:], pubKeys[j][:]) < 0 + }) res, err := inst.Exec("rtm_ext_crypto_ed25519_public_keys_version_1", idData) require.NoError(t, err) @@ -771,7 +773,10 @@ func Test_ext_crypto_ed25519_public_keys_version_1(t *testing.T) { err = scale.Unmarshal(out, &ret) require.NoError(t, err) - sort.Slice(ret, func(i int, j int) bool { return ret[i][0] < ret[j][0] }) + sort.Slice(ret, func(i int, j int) bool { + return bytes.Compare(ret[i][:], ret[j][:]) < 0 + }) + require.Equal(t, pubKeys, ret) } From 3d920cf2160667548fe58f17196ad9a4a1291d50 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Wed, 6 Jul 2022 11:25:59 -0400 Subject: [PATCH 36/48] chore(runtime): remove life runtime (#2645) - it's not maintained, and adds maintenance/compilation complexity - `wazero` will be adopted instead --- .golangci.yml | 5 - cmd/gossamer/config.go | 3 - codecov.yml | 1 - dot/services.go | 18 - dot/services_test.go | 13 - go.mod | 4 - go.sum | 12 - lib/runtime/life/exports.go | 176 ---- lib/runtime/life/exports_test.go | 328 ------- lib/runtime/life/instance.go | 214 ----- lib/runtime/life/resolver.go | 1423 ----------------------------- lib/runtime/life/resolver_test.go | 1007 -------------------- lib/runtime/life/test_helpers.go | 54 -- 13 files changed, 3258 deletions(-) delete mode 100644 lib/runtime/life/exports.go delete mode 100644 lib/runtime/life/exports_test.go delete mode 100644 lib/runtime/life/instance.go delete mode 100644 lib/runtime/life/resolver.go delete mode 100644 lib/runtime/life/resolver_test.go delete mode 100644 lib/runtime/life/test_helpers.go diff --git a/.golangci.yml b/.golangci.yml index e4beaa4478..d0ff7d8ce5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -94,11 +94,6 @@ issues: - revive text: "package comment should be of the form" - - linters: - - revive - path: lib/runtime/life/ - text: "don't use underscores in Go names;" - - linters: - nolintlint source: "^//nolint:revive" diff --git a/cmd/gossamer/config.go b/cmd/gossamer/config.go index eaaa8dc0bd..228eda271e 100644 --- a/cmd/gossamer/config.go +++ b/cmd/gossamer/config.go @@ -20,7 +20,6 @@ import ( "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/genesis" - "github.com/ChainSafe/gossamer/lib/runtime/life" "github.com/ChainSafe/gossamer/lib/runtime/wasmer" "github.com/ChainSafe/gossamer/lib/utils" "github.com/urfave/cli" @@ -622,8 +621,6 @@ func setDotCoreConfig(ctx *cli.Context, tomlCfg ctoml.CoreConfig, cfg *dot.CoreC switch tomlCfg.WasmInterpreter { case wasmer.Name: cfg.WasmInterpreter = wasmer.Name - case life.Name: - cfg.WasmInterpreter = life.Name case "": cfg.WasmInterpreter = gssmr.DefaultWasmInterpreter default: diff --git a/codecov.yml b/codecov.yml index dcf2d87281..12ed6125e8 100644 --- a/codecov.yml +++ b/codecov.yml @@ -38,7 +38,6 @@ coverage: - "dot/network" - "dot/peerset" - "dot/sync" - - "lib/runtime/life" - "lib/grandpa" - "lib/blocktree" 50pc: diff --git a/dot/services.go b/dot/services.go index 5ee1e7e939..a72d94e5aa 100644 --- a/dot/services.go +++ b/dot/services.go @@ -30,7 +30,6 @@ import ( "github.com/ChainSafe/gossamer/lib/grandpa" "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/lib/runtime" - "github.com/ChainSafe/gossamer/lib/runtime/life" "github.com/ChainSafe/gossamer/lib/runtime/wasmer" "github.com/ChainSafe/gossamer/lib/utils" ) @@ -151,23 +150,6 @@ func createRuntime(cfg *Config, ns runtime.NodeStorage, st *state.Service, if err != nil { return nil, fmt.Errorf("failed to create runtime executor: %s", err) } - case life.Name: - rtCfg := &life.Config{ - Resolver: new(life.Resolver), - } - rtCfg.Storage = ts - rtCfg.Keystore = ks - rtCfg.LogLvl = cfg.Log.RuntimeLvl - rtCfg.NodeStorage = ns - rtCfg.Network = net - rtCfg.Role = cfg.Core.Roles - rtCfg.CodeHash = codeHash - - // create runtime executor - rt, err = life.NewInstance(code, rtCfg) - if err != nil { - return nil, fmt.Errorf("failed to create runtime executor: %s", err) - } default: return nil, fmt.Errorf("%w: %s", ErrWasmInterpreterName, cfg.Core.WasmInterpreter) } diff --git a/dot/services_test.go b/dot/services_test.go index a1e9feee38..4abe202c6c 100644 --- a/dot/services_test.go +++ b/dot/services_test.go @@ -21,7 +21,6 @@ import ( "github.com/ChainSafe/gossamer/lib/grandpa" "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/lib/runtime" - "github.com/ChainSafe/gossamer/lib/runtime/life" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/ChainSafe/gossamer/lib/runtime/wasmer" "github.com/golang/mock/gomock" @@ -467,9 +466,6 @@ func Test_createRuntime(t *testing.T) { t.Parallel() cfg := NewTestConfig(t) - cfgLife := NewTestConfig(t) - cfgLife.Core.WasmInterpreter = life.Name - type args struct { cfg *Config ns runtime.NodeStorage @@ -489,15 +485,6 @@ func Test_createRuntime(t *testing.T) { expectedType: &wasmer.Instance{}, err: nil, }, - { - name: "wasmer life", - args: args{ - cfg: cfgLife, - ns: runtime.NodeStorage{}, - }, - expectedType: &life.Instance{}, - err: nil, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/go.mod b/go.mod index a674ad08ec..575da0b4d3 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,6 @@ require ( github.com/multiformats/go-multiaddr v0.6.0 github.com/nanobox-io/golang-scribble v0.0.0-20190309225732-aa3e7c118975 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 - github.com/perlin-network/life v0.0.0-20191203030451-05c0e0f7eaea github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_model v0.2.0 github.com/qdm12/gotree v0.2.0 @@ -68,7 +67,6 @@ require ( github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/flynn/noise v1.0.0 // indirect - github.com/go-interpreter/wagon v0.6.0 // indirect github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect @@ -168,7 +166,6 @@ require ( github.com/tklauser/numcpus v0.2.2 // indirect github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce // indirect github.com/vedhavyas/go-subkey v1.0.2 // indirect - github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9 // indirect github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect @@ -179,7 +176,6 @@ require ( golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect - google.golang.org/appengine v1.6.6 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.0.3 // indirect diff --git a/go.sum b/go.sum index da6ad2aeb4..2cb81c7b8b 100644 --- a/go.sum +++ b/go.sum @@ -253,7 +253,6 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -297,8 +296,6 @@ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-interpreter/wagon v0.6.0 h1:BBxDxjiJiHgw9EdkYXAWs8NHhwnazZ5P2EWBW5hFNWw= -github.com/go-interpreter/wagon v0.6.0/go.mod h1:5+b/MBYkclRZngKF5s6qrgWxSLgE9F5dFdO1hAueZLc= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -1079,8 +1076,6 @@ github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7ir github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/perlin-network/life v0.0.0-20191203030451-05c0e0f7eaea h1:okKoivlkNRRLqXraEtatHfEhW+D71QTwkaj+4n4M2Xc= -github.com/perlin-network/life v0.0.0-20191203030451-05c0e0f7eaea/go.mod h1:3KEU5Dm8MAYWZqity880wOFJ9PhQjyKVZGwAEfc5Q4E= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= @@ -1257,8 +1252,6 @@ github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZF github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/twitchyliquid64/golang-asm v0.0.0-20190126203739-365674df15fc h1:RTUQlKzoZZVG3umWNzOYeFecQLIh+dbxXvJp1zPQJTI= -github.com/twitchyliquid64/golang-asm v0.0.0-20190126203739-365674df15fc/go.mod h1:NoCfSFWosfqMqmmD7hApkirIK9ozpHjxRnRxs1l413A= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -1273,8 +1266,6 @@ github.com/vedhavyas/go-subkey v1.0.2 h1:EW6U+1us4k38AtrBfFOEZTpW9FcF/cIUOxw/pHb github.com/vedhavyas/go-subkey v1.0.2/go.mod h1:T9SEs84XZxRULMZLWtIl48s9rBNE7h6GnkqTgJR8+MU= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/wasmerio/go-ext-wasm v0.3.2-0.20200326095750-0a32be6068ec h1:VElCeVyfCWNmCv6UisKQrr+P2/JRG0uf4/FIdCB4pL0= @@ -1506,7 +1497,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190306220234-b354f8bf4d9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1695,10 +1685,8 @@ google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= diff --git a/lib/runtime/life/exports.go b/lib/runtime/life/exports.go deleted file mode 100644 index f91d161a42..0000000000 --- a/lib/runtime/life/exports.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package life - -import ( - "errors" - "fmt" - "strings" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/runtime" - "github.com/ChainSafe/gossamer/lib/transaction" - "github.com/ChainSafe/gossamer/pkg/scale" -) - -// ValidateTransaction runs the extrinsic through runtime function -// TaggedTransactionQueue_validate_transaction and returns *Validity -func (in *Instance) ValidateTransaction(e types.Extrinsic) (*transaction.Validity, error) { - ret, err := in.Exec(runtime.TaggedTransactionQueueValidateTransaction, e) - if err != nil { - return nil, err - } - - if ret[0] != 0 { - return nil, runtime.NewValidateTransactionError(ret) - } - - v := transaction.NewValidity(0, [][]byte{{}}, [][]byte{{}}, 0, false) - err = scale.Unmarshal(ret[1:], v) - return v, err -} - -// Version calls runtime function Core_Version -func (in *Instance) Version() (runtime.Version, error) { - res, err := in.Exec(runtime.CoreVersion, []byte{}) - if err != nil { - return nil, err - } - - version := &runtime.VersionData{} - err = version.Decode(res) - // error comes from scale now, so do a string check - if err != nil { - if strings.Contains(err.Error(), "EOF") { - // kusama seems to use the legacy version format - lversion := &runtime.LegacyVersionData{} - err = lversion.Decode(res) - return lversion, err - } - return nil, err - } - - return version, nil -} - -// Metadata calls runtime function Metadata_metadata -func (in *Instance) Metadata() ([]byte, error) { - return in.Exec(runtime.Metadata, []byte{}) -} - -// BabeConfiguration gets the configuration data for BABE from the runtime -func (in *Instance) BabeConfiguration() (*types.BabeConfiguration, error) { - data, err := in.Exec(runtime.BabeAPIConfiguration, []byte{}) - if err != nil { - return nil, err - } - - bc := new(types.BabeConfiguration) - err = scale.Unmarshal(data, bc) - if err != nil { - return nil, err - } - - return bc, nil -} - -// GrandpaAuthorities returns the genesis authorities from the runtime -func (in *Instance) GrandpaAuthorities() ([]types.Authority, error) { - ret, err := in.Exec(runtime.GrandpaAuthorities, []byte{}) - if err != nil { - return nil, err - } - - var gar []types.GrandpaAuthoritiesRaw - err = scale.Unmarshal(ret, &gar) - if err != nil { - return nil, err - } - - return types.GrandpaAuthoritiesRawToAuthorities(gar) -} - -// InitializeBlock calls runtime API function Core_initialise_block -func (in *Instance) InitializeBlock(header *types.Header) error { - encodedHeader, err := scale.Marshal(*header) - if err != nil { - return fmt.Errorf("cannot encode header: %w", err) - } - - _, err = in.Exec(runtime.CoreInitializeBlock, encodedHeader) - return err -} - -// InherentExtrinsics calls runtime API function BlockBuilder_inherent_extrinsics -func (in *Instance) InherentExtrinsics(data []byte) ([]byte, error) { - return in.Exec(runtime.BlockBuilderInherentExtrinsics, data) -} - -// ApplyExtrinsic calls runtime API function BlockBuilder_apply_extrinsic -func (in *Instance) ApplyExtrinsic(data types.Extrinsic) ([]byte, error) { - return in.Exec(runtime.BlockBuilderApplyExtrinsic, data) -} - -// FinalizeBlock calls runtime API function BlockBuilder_finalize_block -func (in *Instance) FinalizeBlock() (*types.Header, error) { - data, err := in.Exec(runtime.BlockBuilderFinalizeBlock, []byte{}) - if err != nil { - return nil, err - } - - bh := types.NewEmptyHeader() - err = scale.Unmarshal(data, bh) - if err != nil { - return nil, err - } - - return bh, nil -} - -// ExecuteBlock calls runtime function Core_execute_block -func (in *Instance) ExecuteBlock(block *types.Block) ([]byte, error) { - // copy block since we're going to modify it - b, err := block.DeepCopy() - if err != nil { - return nil, err - } - - b.Header.Digest = types.NewDigest() - - // remove seal digest only - for _, d := range block.Header.Digest.Types { - switch d.Value().(type) { - case types.SealDigest: - continue - default: - err = b.Header.Digest.Add(d.Value()) - if err != nil { - return nil, err - } - } - } - - bdEnc, err := b.Encode() - if err != nil { - return nil, err - } - - return in.Exec(runtime.CoreExecuteBlock, bdEnc) -} - -// DecodeSessionKeys decodes the given public session keys. Returns a list of raw public keys including their key type. -func (in *Instance) DecodeSessionKeys(enc []byte) ([]byte, error) { - return in.Exec(runtime.DecodeSessionKeys, enc) -} - -// PaymentQueryInfo returns information of a given extrinsic -func (*Instance) PaymentQueryInfo([]byte) (*types.TransactionPaymentQueryInfo, error) { - // TODO: implement the payment query info (see issue #1892) - return nil, errors.New("not implemented yet") -} - -func (in *Instance) CheckInherents() {} //nolint:revive -func (in *Instance) RandomSeed() {} //nolint:revive -func (in *Instance) OffchainWorker() {} //nolint:revive -func (in *Instance) GenerateSessionKeys() {} //nolint:revive diff --git a/lib/runtime/life/exports_test.go b/lib/runtime/life/exports_test.go deleted file mode 100644 index d8cfe1e03b..0000000000 --- a/lib/runtime/life/exports_test.go +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package life - -import ( - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/genesis" - "github.com/ChainSafe/gossamer/lib/keystore" - "github.com/ChainSafe/gossamer/lib/runtime" - "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/lib/trie" - "github.com/ChainSafe/gossamer/lib/utils" - "github.com/ChainSafe/gossamer/pkg/scale" - "github.com/stretchr/testify/require" -) - -func newInstanceFromGenesis(t *testing.T) runtime.Instance { - genesisPath := utils.GetGssmrGenesisRawPathTest(t) - gen, err := genesis.NewGenesisFromJSONRaw(genesisPath) - require.NoError(t, err) - - genTrie, err := genesis.NewTrieFromGenesis(gen) - require.NoError(t, err) - - // set state to genesis state - genState, err := storage.NewTrieState(genTrie) - require.NoError(t, err) - - cfg := &Config{} - cfg.Storage = genState - cfg.LogLvl = 4 - - instance, err := NewRuntimeFromGenesis(cfg) - require.NoError(t, err) - return instance -} - -func TestInstance_Version_NodeRuntime(t *testing.T) { - expected := runtime.NewVersionData( - []byte("node"), - []byte("substrate-node"), - 10, - 264, - 0, - nil, - 2, - ) - - instance := newInstanceFromGenesis(t) - - version, err := instance.Version() - require.NoError(t, err) - - t.Logf("SpecName: %s\n", version.SpecName()) - t.Logf("ImplName: %s\n", version.ImplName()) - t.Logf("AuthoringVersion: %d\n", version.AuthoringVersion()) - t.Logf("SpecVersion: %d\n", version.SpecVersion()) - t.Logf("ImplVersion: %d\n", version.ImplVersion()) - t.Logf("TransactionVersion: %d\n", version.TransactionVersion()) - - require.Equal(t, 13, len(version.APIItems())) - require.Equal(t, expected.SpecName(), version.SpecName()) - require.Equal(t, expected.ImplName(), version.ImplName()) - require.Equal(t, expected.AuthoringVersion(), version.AuthoringVersion()) - require.Equal(t, expected.SpecVersion(), version.SpecVersion()) - require.Equal(t, expected.ImplVersion(), version.ImplVersion()) - require.Equal(t, expected.TransactionVersion(), version.TransactionVersion()) -} - -func TestInstance_BabeConfiguration_NodeRuntime_WithAuthorities(t *testing.T) { - instance := newInstanceFromGenesis(t) - cfg, err := instance.BabeConfiguration() - require.NoError(t, err) - - kr, _ := keystore.NewSr25519Keyring() - - expectedAuthData := []types.AuthorityRaw{} - - for _, kp := range kr.Keys { - kb := [32]byte{} - copy(kb[:], kp.Public().Encode()) - expectedAuthData = append(expectedAuthData, types.AuthorityRaw{ - Key: kb, - Weight: 1, - }) - } - - expected := &types.BabeConfiguration{ - SlotDuration: 3000, - EpochLength: 200, - C1: 1, - C2: 2, - GenesisAuthorities: expectedAuthData, - Randomness: [32]byte{}, - SecondarySlots: 1, - } - - require.Equal(t, expected, cfg) -} - -func TestInstance_GrandpaAuthorities_NodeRuntime(t *testing.T) { - instance := newInstanceFromGenesis(t) - auths, err := instance.GrandpaAuthorities() - require.NoError(t, err) - - kr, _ := keystore.NewEd25519Keyring() - - t.Logf("%x", kr.Alice().Public()) - t.Logf("%x", auths[0].Key) - - var expected []types.Authority - - for _, kp := range kr.Keys { - expected = append(expected, types.Authority{ - Key: kp.Public(), - Weight: 1, - }) - } - - require.Equal(t, expected, auths) -} - -func buildBlock(t *testing.T, instance runtime.Instance) *types.Block { - header := &types.Header{ - ParentHash: trie.EmptyHash, - Number: 1, - Digest: types.NewDigest(), - } - - err := instance.InitializeBlock(header) - require.NoError(t, err) - - idata := types.NewInherentsData() - err = idata.SetInt64Inherent(types.Timstap0, uint64(time.Now().Unix())) - require.NoError(t, err) - - err = idata.SetInt64Inherent(types.Babeslot, 1) - require.NoError(t, err) - - ienc, err := idata.Encode() - require.NoError(t, err) - - // Call BlockBuilder_inherent_extrinsics which returns the inherents as extrinsics - inherentExts, err := instance.InherentExtrinsics(ienc) - require.NoError(t, err) - - //// decode inherent extrinsics - var exts [][]byte - err = scale.Unmarshal(inherentExts, &exts) - require.NoError(t, err) - - // apply each inherent extrinsic - for _, ext := range exts { - in, err := scale.Marshal(ext) - require.NoError(t, err) - - ret, err := instance.ApplyExtrinsic(append([]byte{1}, in...)) - require.NoError(t, err, in) - require.Equal(t, ret, []byte{0, 0}) - } - - res, err := instance.FinalizeBlock() - require.NoError(t, err) - - res.Number = header.Number - - babeDigest := types.NewBabeDigest() - err = babeDigest.Set(*types.NewBabePrimaryPreDigest(0, 1, [32]byte{}, [64]byte{})) - require.NoError(t, err) - data, err := scale.Marshal(babeDigest) - require.NoError(t, err) - preDigest := types.NewBABEPreRuntimeDigest(data) - - digest := types.NewDigest() - err = digest.Add(*preDigest) - require.NoError(t, err) - res.Digest = digest - - expected := &types.Header{ - ParentHash: header.ParentHash, - Number: 1, - Digest: digest, - } - - require.Equal(t, expected.ParentHash, res.ParentHash) - require.Equal(t, expected.Number, res.Number) - require.Equal(t, expected.Digest, res.Digest) - require.False(t, res.StateRoot.IsEmpty()) - require.False(t, res.ExtrinsicsRoot.IsEmpty()) - require.NotEqual(t, trie.EmptyHash, res.StateRoot) - - return &types.Block{ - Header: *res, - Body: *types.NewBody(types.BytesArrayToExtrinsics(exts)), - } -} - -func TestInstance_FinalizeBlock_NodeRuntime(t *testing.T) { - instance := newInstanceFromGenesis(t) - buildBlock(t, instance) -} - -func TestInstance_ExecuteBlock_GossamerRuntime(t *testing.T) { - t.Skip("Broken due to outdated runtime") - - instance := newInstanceFromGenesis(t) - block := buildBlock(t, instance) - - // reset state back to parent state before executing - genesisPath := utils.GetGssmrGenesisRawPathTest(t) - gen, err := genesis.NewGenesisFromJSONRaw(genesisPath) - require.NoError(t, err) - genTrie, err := genesis.NewTrieFromGenesis(gen) - require.NoError(t, err) - parentState, err := storage.NewTrieState(genTrie) - require.NoError(t, err) - instance.SetContextStorage(parentState) - - _, err = instance.ExecuteBlock(block) - require.NoError(t, err) -} - -func TestInstance_ExecuteBlock_KusamaRuntime_KusamaBlock1(t *testing.T) { - genesisPath := utils.GetKusamaGenesisPath(t) - gen, err := genesis.NewGenesisFromJSONRaw(genesisPath) - require.NoError(t, err) - - genTrie, err := genesis.NewTrieFromGenesis(gen) - require.NoError(t, err) - - expectedGenesisRoot := common.MustHexToHash("0xb0006203c3a6e6bd2c6a17b1d4ae8ca49a31da0f4579da950b127774b44aef6b") - require.Equal(t, expectedGenesisRoot, genTrie.MustHash()) - - // set state to genesis state - genState, err := storage.NewTrieState(genTrie) - require.NoError(t, err) - - cfg := &Config{} - cfg.Storage = genState - cfg.LogLvl = 4 - - instance, err := NewRuntimeFromGenesis(cfg) - require.NoError(t, err) - - // block data is received from querying a polkadot node - body := []byte{8, 40, 4, 2, 0, 11, 144, 17, 14, 179, 110, 1, 16, 4, 20, 0, 0} - var exts [][]byte - err = scale.Unmarshal(body, &exts) - require.NoError(t, err) - require.Equal(t, 2, len(exts)) - - // digest from polkadot.js - digestBytes := common.MustHexToBytes("0x0c0642414245340201000000ef55a50f00000000044241424549040118ca239392960473fe1bc65f94ee27d890a49c1b200c006ff5dcc525330ecc16770100000000000000b46f01874ce7abbb5220e8fd89bede0adad14c73039d91e28e881823433e723f0100000000000000d684d9176d6eb69887540c9a89fa6097adea82fc4b0ff26d1062b488f352e179010000000000000068195a71bdde49117a616424bdc60a1733e96acb1da5aeab5d268cf2a572e94101000000000000001a0575ef4ae24bdfd31f4cb5bd61239ae67c12d4e64ae51ac756044aa6ad8200010000000000000018168f2aad0081a25728961ee00627cfe35e39833c805016632bf7c14da5800901000000000000000000000000000000000000000000000000000000000000000000000000000000054241424501014625284883e564bc1e4063f5ea2b49846cdddaa3761d04f543b698c1c3ee935c40d25b869247c36c6b8a8cbbd7bb2768f560ab7c276df3c62df357a7e3b1ec8d") //nolint:lll - digest := types.NewDigest() - err = scale.Unmarshal(digestBytes, &digest) - require.NoError(t, err) - - // kusama block 1, from polkadot.js - block := &types.Block{ - Header: types.Header{ - ParentHash: common.MustHexToHash("0xb0a8d493285c2df73290dfb7e61f870f17b41801197a149ca93654499ea3dafe"), - Number: 1, - StateRoot: common.MustHexToHash("0xfabb0c6e92d29e8bb2167f3c6fb0ddeb956a4278a3cf853661af74a076fc9cb7"), - ExtrinsicsRoot: common.MustHexToHash("0xa35fb7f7616f5c979d48222b3d2fa7cb2331ef73954726714d91ca945cc34fd8"), - Digest: digest, - }, - Body: *types.NewBody(types.BytesArrayToExtrinsics(exts)), - } - - _, err = instance.ExecuteBlock(block) - require.NoError(t, err) -} - -func TestInstance_ExecuteBlock_PolkadotRuntime_PolkadotBlock1(t *testing.T) { - genesisPath := utils.GetPolkadotGenesisPath(t) - gen, err := genesis.NewGenesisFromJSONRaw(genesisPath) - require.NoError(t, err) - - genTrie, err := genesis.NewTrieFromGenesis(gen) - require.NoError(t, err) - - expectedGenesisRoot := common.MustHexToHash("0x29d0d972cd27cbc511e9589fcb7a4506d5eb6a9e8df205f00472e5ab354a4e17") - require.Equal(t, expectedGenesisRoot, genTrie.MustHash()) - - // set state to genesis state - genState, err := storage.NewTrieState(genTrie) - require.NoError(t, err) - - cfg := &Config{} - cfg.Storage = genState - cfg.LogLvl = 5 - - instance, err := NewRuntimeFromGenesis(cfg) - require.NoError(t, err) - - // block data is received from querying a polkadot node - body := []byte{8, 40, 4, 3, 0, 11, 80, 149, 160, 81, 114, 1, 16, 4, 20, 0, 0} - var exts [][]byte - err = scale.Unmarshal(body, &exts) - require.NoError(t, err) - require.Equal(t, 2, len(exts)) - - // digest data received from querying polkadot node - digestBytes := common.MustHexToBytes("0x0c0642414245b501010000000093decc0f00000000362ed8d6055645487fe42e9c8640be651f70a3a2a03658046b2b43f021665704501af9b1ca6e974c257e3d26609b5f68b5b0a1da53f7f252bbe5d94948c39705c98ffa4b869dd44ac29528e3723d619cc7edf1d3f7b7a57a957f6a7e9bdb270a044241424549040118fa3437b10f6e7af8f31362df3a179b991a8c56313d1bcd6307a4d0c734c1ae310100000000000000d2419bc8835493ac89eb09d5985281f5dff4bc6c7a7ea988fd23af05f301580a0100000000000000ccb6bef60defc30724545d57440394ed1c71ea7ee6d880ed0e79871a05b5e40601000000000000005e67b64cf07d4d258a47df63835121423551712844f5b67de68e36bb9a21e12701000000000000006236877b05370265640c133fec07e64d7ca823db1dc56f2d3584b3d7c0f1615801000000000000006c52d02d95c30aa567fda284acf25025ca7470f0b0c516ddf94475a1807c4d250100000000000000000000000000000000000000000000000000000000000000000000000000000005424142450101d468680c844b19194d4dfbdc6697a35bf2b494bda2c5a6961d4d4eacfbf74574379ba0d97b5bb650c2e8670a63791a727943bcb699dc7a228bdb9e0a98c9d089") //nolint:lll - digest := types.NewDigest() - err = scale.Unmarshal(digestBytes, &digest) - require.NoError(t, err) - - // polkadot block 1, from polkadot.js - block := &types.Block{ - Header: types.Header{ - ParentHash: common.MustHexToHash("0x91b171bb158e2d3848fa23a9f1c25182fb8e20313b2c1eb49219da7a70ce90c3"), - Number: 1, - StateRoot: common.MustHexToHash("0xc56fcd6e7a757926ace3e1ecff9b4010fc78b90d459202a339266a7f6360002f"), - ExtrinsicsRoot: common.MustHexToHash("0x9a87f6af64ef97aff2d31bebfdd59f8fe2ef6019278b634b2515a38f1c4c2420"), - Digest: digest, - }, - Body: *types.NewBody(types.BytesArrayToExtrinsics(exts)), - } - - _, err = instance.ExecuteBlock(block) - require.NoError(t, err) -} diff --git a/lib/runtime/life/instance.go b/lib/runtime/life/instance.go deleted file mode 100644 index 1611f04b3c..0000000000 --- a/lib/runtime/life/instance.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package life - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/ChainSafe/gossamer/internal/log" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/crypto" - "github.com/ChainSafe/gossamer/lib/keystore" - "github.com/ChainSafe/gossamer/lib/runtime" - - "github.com/perlin-network/life/exec" - wasm_validation "github.com/perlin-network/life/wasm-validation" -) - -// Name represents the name of the interpreter -const Name = "life" - -// Check that runtime interfaces are satisfied -var ( - _ runtime.Instance = (*Instance)(nil) - logger = log.NewFromGlobal( - log.AddContext("pkg", "runtime"), - log.AddContext("component", "perlin/life"), - ) - ctx *runtime.Context -) - -// Config represents a life configuration -type Config struct { - runtime.InstanceConfig - Resolver exec.ImportResolver -} - -// Instance is a runtime life instance -type Instance struct { - vm *exec.VirtualMachine - mu sync.Mutex -} - -// GetCodeHash returns code hash of the runtime -func (*Instance) GetCodeHash() common.Hash { - return common.Hash{} -} - -// NewRuntimeFromGenesis creates a runtime instance from the genesis data -func NewRuntimeFromGenesis(cfg *Config) (runtime.Instance, error) { - if cfg.Storage == nil { - return nil, errors.New("storage is nil") - } - - code := cfg.Storage.LoadCode() - if len(code) == 0 { - return nil, fmt.Errorf("cannot find :code in state") - } - - cfg.Resolver = new(Resolver) - return NewInstance(code, cfg) -} - -// NewInstanceFromFile instantiates a runtime from a .wasm file -func NewInstanceFromFile(fp string, cfg *Config) (*Instance, error) { - // Reads the WebAssembly module as bytes. - bytes, err := os.ReadFile(filepath.Clean(fp)) - if err != nil { - return nil, err - } - - if err = wasm_validation.ValidateWasm(bytes); err != nil { - return nil, err - } - - return NewInstance(bytes, cfg) -} - -// NewInstance ... -func NewInstance(code []byte, cfg *Config) (*Instance, error) { - if len(code) == 0 { - return nil, errors.New("code is empty") - } - - logger.Patch(log.SetLevel(cfg.LogLvl)) - - vmCfg := exec.VMConfig{ - DefaultMemoryPages: 23, - } - - instance, err := exec.NewVirtualMachine(code, vmCfg, cfg.Resolver, nil) - if err != nil { - return nil, err - } - - memory := &Memory{ - memory: instance.Memory, - } - - // TODO: use __heap_base (#1874) - allocator := runtime.NewAllocator(memory, 0) - - runtimeCtx := &runtime.Context{ - Storage: cfg.Storage, - Allocator: allocator, - Keystore: cfg.Keystore, - Validator: cfg.Role == byte(4), - NodeStorage: cfg.NodeStorage, - Network: cfg.Network, - Transaction: cfg.Transaction, - SigVerifier: crypto.NewSignatureVerifier(logger), - } - - logger.Debugf("creating new runtime instance with context: %v", runtimeCtx) - - inst := &Instance{ - vm: instance, - } - - ctx = runtimeCtx - return inst, nil -} - -// Memory is a thin wrapper around life's memory to support -// Gossamer runtime.Memory interface -type Memory struct { - memory []byte -} - -// Data returns the memory's data -func (m *Memory) Data() []byte { - return m.memory -} - -// Length returns the memory's length -func (m *Memory) Length() uint32 { - return uint32(len(m.memory)) -} - -// Grow ... -func (m *Memory) Grow(numPages uint32) error { - m.memory = append(m.memory, make([]byte, runtime.PageSize*numPages)...) - return nil -} - -// UpdateRuntimeCode ... -func (*Instance) UpdateRuntimeCode(_ []byte) error { - return errors.New("unimplemented") -} - -// CheckRuntimeVersion ... -func (*Instance) CheckRuntimeVersion(_ []byte) (runtime.Version, error) { - return nil, errors.New("unimplemented") -} - -// SetContextStorage sets the runtime's storage. It should be set before calls to the below functions. -func (*Instance) SetContextStorage(s runtime.Storage) { - ctx.Storage = s -} - -// Exec calls the given function with the given data -func (in *Instance) Exec(function string, data []byte) ([]byte, error) { - in.mu.Lock() - defer in.mu.Unlock() - - ptr, err := ctx.Allocator.Allocate(uint32(len(data))) - if err != nil { - return nil, err - } - defer ctx.Allocator.Clear() - - copy(in.vm.Memory[ptr:ptr+uint32(len(data))], data) - - fnc, ok := in.vm.GetFunctionExport(function) - if !ok { - return nil, fmt.Errorf("could not find exported function %s", function) - } - - ret, err := in.vm.Run(fnc, int64(ptr), int64(len(data))) - if err != nil { - fmt.Println(in.vm.StackTrace) - return nil, err - } - - offset, length := runtime.Int64ToPointerAndSize(ret) - return in.vm.Memory[offset : offset+length], nil -} - -// Stop ... -func (*Instance) Stop() {} - -// NodeStorage to get reference to runtime node service -func (*Instance) NodeStorage() runtime.NodeStorage { - return ctx.NodeStorage -} - -// NetworkService to get referernce to runtime network service -func (*Instance) NetworkService() runtime.BasicNetwork { - return ctx.Network -} - -// Validator returns the context's Validator -func (*Instance) Validator() bool { - return ctx.Validator -} - -// Keystore to get reference to runtime keystore -func (*Instance) Keystore() *keystore.GlobalKeystore { - return ctx.Keystore -} diff --git a/lib/runtime/life/resolver.go b/lib/runtime/life/resolver.go deleted file mode 100644 index 21be163dc4..0000000000 --- a/lib/runtime/life/resolver.go +++ /dev/null @@ -1,1423 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package life - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math/big" - - "github.com/ChainSafe/gossamer/lib/common" - rtype "github.com/ChainSafe/gossamer/lib/common/types" - "github.com/ChainSafe/gossamer/lib/crypto" - "github.com/ChainSafe/gossamer/lib/crypto/ed25519" - "github.com/ChainSafe/gossamer/lib/crypto/secp256k1" - "github.com/ChainSafe/gossamer/lib/crypto/sr25519" - "github.com/ChainSafe/gossamer/lib/runtime" - "github.com/ChainSafe/gossamer/lib/trie" - "github.com/ChainSafe/gossamer/pkg/scale" - "github.com/perlin-network/life/exec" -) - -// Resolver resolves the imports for life -type Resolver struct{} // TODO: move context inside resolver (#1875) - -// ResolveFunc ... -func (*Resolver) ResolveFunc(module, field string) exec.FunctionImport { //nolint:gocyclo - switch module { - case "env": - switch field { - case "ext_logging_log_version_1": - return ext_logging_log_version_1 - case "ext_misc_print_utf8_version_1": - return ext_misc_print_utf8_version_1 - case "ext_misc_print_hex_version_1": - return ext_misc_print_hex_version_1 - case "ext_allocator_malloc_version_1": - return ext_allocator_malloc_version_1 - case "ext_allocator_free_version_1": - return ext_allocator_free_version_1 - case "ext_hashing_blake2_256_version_1": - return ext_hashing_blake2_256_version_1 - case "ext_hashing_twox_128_version_1": - return ext_hashing_twox_128_version_1 - case "ext_storage_get_version_1": - return ext_storage_get_version_1 - case "ext_storage_set_version_1": - return ext_storage_set_version_1 - case "ext_storage_next_key_version_1": - return ext_storage_next_key_version_1 - case "ext_hashing_twox_64_version_1": - return ext_hashing_twox_64_version_1 - case "ext_storage_clear_version_1": - return ext_storage_clear_version_1 - case "ext_storage_clear_prefix_version_1": - return ext_storage_clear_prefix_version_1 - case "ext_storage_read_version_1": - return ext_storage_read_version_1 - case "ext_storage_append_version_1": - return ext_storage_append_version_1 - case "ext_trie_blake2_256_ordered_root_version_1": - return ext_trie_blake2_256_ordered_root_version_1 - case "ext_storage_root_version_1": - return ext_storage_root_version_1 - case "ext_storage_changes_root_version_1": - return ext_storage_changes_root_version_1 - case "ext_crypto_start_batch_verify_version_1": - return ext_crypto_start_batch_verify_version_1 - case "ext_crypto_finish_batch_verify_version_1": - return ext_crypto_finish_batch_verify_version_1 - case "ext_offchain_index_set_version_1": - return ext_offchain_index_set_version_1 - case "ext_storage_exists_version_1": - return ext_storage_exists_version_1 - case "ext_default_child_storage_set_version_1": - return ext_default_child_storage_set_version_1 - case "ext_default_child_storage_get_version_1": - return ext_default_child_storage_get_version_1 - case "ext_default_child_storage_read_version_1": - return ext_default_child_storage_read_version_1 - case "ext_default_child_storage_clear_version_1": - return ext_default_child_storage_clear_version_1 - case "ext_default_child_storage_storage_kill_version_1": - return ext_default_child_storage_storage_kill_version_1 - case "ext_default_child_storage_exists_version_1": - return ext_default_child_storage_exists_version_1 - case "ext_default_child_storage_clear_prefix_version_1": - return ext_default_child_storage_clear_prefix_version_1 - case "ext_default_child_storage_root_version_1": - return ext_default_child_storage_root_version_1 - case "ext_default_child_storage_next_key_version_1": - return ext_default_child_storage_next_key_version_1 - case "ext_crypto_ed25519_public_keys_version_1": - return ext_crypto_ed25519_public_keys_version_1 - case "ext_crypto_ed25519_generate_version_1": - return ext_crypto_ed25519_generate_version_1 - case "ext_crypto_ed25519_sign_version_1": - return ext_crypto_ed25519_sign_version_1 - case "ext_crypto_ed25519_verify_version_1": - return ext_crypto_ed25519_verify_version_1 - case "ext_crypto_sr25519_public_keys_version_1": - return ext_crypto_sr25519_public_keys_version_1 - case "ext_crypto_sr25519_generate_version_1": - return ext_crypto_sr25519_generate_version_1 - case "ext_crypto_sr25519_sign_version_1": - return ext_crypto_sr25519_sign_version_1 - case "ext_crypto_sr25519_verify_version_1": - return ext_crypto_sr25519_verify_version_1 - case "ext_crypto_secp256k1_ecdsa_recover_version_1": - return ext_crypto_secp256k1_ecdsa_recover_version_1 - case "ext_hashing_keccak_256_version_1": - return ext_hashing_keccak_256_version_1 - case "ext_hashing_sha2_256_version_1": - return ext_hashing_sha2_256_version_1 - case "ext_hashing_blake2_128_version_1": - return ext_hashing_blake2_128_version_1 - case "ext_hashing_twox_256_version_1": - return ext_hashing_twox_256_version_1 - case "ext_trie_blake2_256_root_version_1": - return ext_trie_blake2_256_root_version_1 - default: - panic(fmt.Errorf("unknown import resolved: %s", field)) - } - default: - panic(fmt.Errorf("unknown module: %s", module)) - } -} - -// ResolveGlobal ... -func (*Resolver) ResolveGlobal(_, _ string) int64 { - panic("we're not resolving global variables for now") -} - -func ext_logging_log_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - level := int32(vm.GetCurrentFrame().Locals[0]) - targetData := vm.GetCurrentFrame().Locals[1] - msgData := vm.GetCurrentFrame().Locals[2] - - target := asMemorySlice(vm.Memory, targetData) - msg := asMemorySlice(vm.Memory, msgData) - - switch int(level) { - case 0: - logger.Criticalf("target=%s message=%s", string(target), string(msg)) - case 1: - logger.Warnf("target=%s message=%s", string(target), string(msg)) - case 2: - logger.Infof("target=%s message=%s", string(target), string(msg)) - case 3: - logger.Debugf("target=%s message=%s", string(target), string(msg)) - case 4: - logger.Tracef("target=%s message=%s", string(target), string(msg)) - default: - logger.Errorf("level=%d target=%s message=%s", level, string(target), string(msg)) - } - - return 0 -} - -func ext_misc_print_utf8_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - dataSpan := vm.GetCurrentFrame().Locals[0] - data := asMemorySlice(vm.Memory, dataSpan) - logger.Debugf("utf8 data: 0x%x", data) - return 0 -} - -func ext_misc_print_hex_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - dataSpan := vm.GetCurrentFrame().Locals[0] - data := asMemorySlice(vm.Memory, dataSpan) - logger.Debugf("data is: 0x%x", data) - return 0 -} - -func ext_allocator_malloc_version_1(vm *exec.VirtualMachine) int64 { - size := uint32(vm.GetCurrentFrame().Locals[0]) - logger.Tracef("executing with size %d...", size) - - // Allocate memory - res, err := ctx.Allocator.Allocate(size) - if err != nil { - logger.Errorf("[ext_allocator_malloc_version_1]: %s", err) - panic(err) - } - - return int64(res) -} - -func ext_allocator_free_version_1(vm *exec.VirtualMachine) int64 { - addr := uint32(vm.GetCurrentFrame().Locals[0]) - logger.Tracef("executing at address %d...", addr) - - // Deallocate memory - err := ctx.Allocator.Deallocate(addr) - if err != nil { - logger.Errorf("[ext_allocator_free_version_1]: %s", err) - panic(err) - } - - return 0 -} - -func ext_hashing_blake2_256_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - dataSpan := vm.GetCurrentFrame().Locals[0] - - data := asMemorySlice(vm.Memory, dataSpan) - - hash, err := common.Blake2bHash(data) - if err != nil { - logger.Errorf("[ext_hashing_blake2_256_version_1]: %s", err) - return 0 - } - - logger.Debugf("data is 0x%x and hash is 0x%x", data, hash) - - out, err := toWasmMemorySized(vm.Memory, hash[:], 32) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return int64(out) -} - -func ext_hashing_twox_128_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - dataSpan := vm.GetCurrentFrame().Locals[0] - data := asMemorySlice(vm.Memory, dataSpan) - - hash, err := common.Twox128Hash(data) - if err != nil { - logger.Errorf("[ext_hashing_twox_128_version_1]: %s", err) - return 0 - } - - logger.Debugf("data is 0x%x and hash is 0x%x", data, hash) - - out, err := toWasmMemorySized(vm.Memory, hash, 16) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return int64(out) -} - -func ext_hashing_twox_64_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - dataSpan := vm.GetCurrentFrame().Locals[0] - data := asMemorySlice(vm.Memory, dataSpan) - - hash, err := common.Twox64(data) - if err != nil { - logger.Errorf("[ext_hashing_twox_64_version_1]: %s", err) - return 0 - } - - logger.Debugf("data is 0x%x and hash is 0x%x", data, hash) - - out, err := toWasmMemorySized(vm.Memory, hash, 8) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return int64(out) -} - -func ext_storage_get_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - keySpan := vm.GetCurrentFrame().Locals[0] - storage := ctx.Storage - - key := asMemorySlice(vm.Memory, keySpan) - logger.Debugf("key: 0x%x", key) - - value := storage.Get(key) - logger.Debugf("value: 0x%x", value) - - valueSpan, err := toWasmMemoryOptional(vm.Memory, value) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - ptr, _ := toWasmMemoryOptional(vm.Memory, nil) - return ptr - } - - return valueSpan -} - -func ext_storage_set_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - keySpan := vm.GetCurrentFrame().Locals[0] - valueSpan := vm.GetCurrentFrame().Locals[1] - storage := ctx.Storage - - key := asMemorySlice(vm.Memory, keySpan) - value := asMemorySlice(vm.Memory, valueSpan) - - logger.Infof("key 0x%x and value 0x%x", key, value) - - cp := make([]byte, len(value)) - copy(cp, value) - storage.Set(key, cp) - return 0 -} - -func ext_storage_next_key_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - keySpan := vm.GetCurrentFrame().Locals[0] - storage := ctx.Storage - - key := asMemorySlice(vm.Memory, keySpan) - - next := storage.NextKey(key) - logger.Debugf("key is 0x%x and next is 0x%x", key, next) - - nextSpan, err := toWasmMemoryOptional(vm.Memory, next) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return nextSpan -} - -func ext_storage_clear_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - keySpan := vm.GetCurrentFrame().Locals[0] - storage := ctx.Storage - - key := asMemorySlice(vm.Memory, keySpan) - - logger.Debugf("key: 0x%x", key) - storage.Delete(key) - return 0 -} - -func ext_storage_clear_prefix_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - storage := ctx.Storage - prefixSpan := vm.GetCurrentFrame().Locals[0] - - prefix := asMemorySlice(vm.Memory, prefixSpan) - logger.Debugf("prefix: 0x%x", prefix) - - err := storage.ClearPrefix(prefix) - if err != nil { - logger.Errorf("[ext_storage_clear_prefix_version_1]: %s", err) - } - - // sanity check - next := storage.NextKey(prefix) - if len(next) >= len(prefix) && bytes.Equal(prefix, next[:len(prefix)]) { - panic("did not clear prefix") - } - - return 0 -} - -func ext_storage_exists_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - keySpan := vm.GetCurrentFrame().Locals[0] - storage := ctx.Storage - - key := asMemorySlice(vm.Memory, keySpan) - - val := storage.Get(key) - if len(val) == 0 { - return 0 - } - - return 1 -} - -func ext_storage_read_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - keySpan := vm.GetCurrentFrame().Locals[0] - valueOut := vm.GetCurrentFrame().Locals[1] - offset := int32(vm.GetCurrentFrame().Locals[2]) - storage := ctx.Storage - memory := vm.Memory - - key := asMemorySlice(memory, keySpan) - value := storage.Get(key) - logger.Debugf("key 0x%x and value 0x%x", key, value) - - if value == nil { - ret, _ := toWasmMemoryOptional(memory, nil) - return ret - } - - var size uint32 - - if int(offset) > len(value) { - size = uint32(0) - } else { - size = uint32(len(value[offset:])) - valueBuf, valueLen := runtime.Int64ToPointerAndSize(valueOut) - copy(memory[valueBuf:valueBuf+valueLen], value[offset:]) - } - - sizeSpan, err := toWasmMemoryOptionalUint32(memory, &size) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return sizeSpan -} - -func storageAppend(storage runtime.Storage, key, valueToAppend []byte) error { - nextLength := big.NewInt(1) - var valueRes []byte - - // this function assumes the item in storage is a SCALE encoded array of items - // the valueToAppend is a new item, so it appends the item and increases the length prefix by 1 - valueCurr := storage.Get(key) - - if len(valueCurr) == 0 { - valueRes = valueToAppend - } else { - var currLength *big.Int - err := scale.Unmarshal(valueCurr, &currLength) - if err != nil { - logger.Tracef("item in storage is not SCALE encoded, overwriting at key 0x%x", key) - storage.Set(key, append([]byte{4}, valueToAppend...)) - return nil - } - - lengthBytes, err := scale.Marshal(currLength) - if err != nil { - return err - } - // append new item, pop off number of bytes required for length encoding, - // since we're not using old scale.Decoder - valueRes = append(valueCurr[len(lengthBytes):], valueToAppend...) - - // increase length by 1 - nextLength = big.NewInt(0).Add(currLength, big.NewInt(1)) - } - - lengthEnc, err := scale.Marshal(nextLength) - if err != nil { - logger.Tracef("failed to encode new length: %s", err) - return err - } - - // append new length prefix to start of items array - lengthEnc = append(lengthEnc, valueRes...) - logger.Debugf("resulting value: 0x%x", lengthEnc) - storage.Set(key, lengthEnc) - return nil -} - -func ext_storage_append_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - storage := ctx.Storage - keySpan := vm.GetCurrentFrame().Locals[0] - valueSpan := vm.GetCurrentFrame().Locals[1] - - key := asMemorySlice(vm.Memory, keySpan) - logger.Debugf("key 0x%x", key) - valueAppend := asMemorySlice(vm.Memory, valueSpan) - - err := storageAppend(storage, key, valueAppend) - if err != nil { - logger.Errorf("[ext_storage_append_version_1]: %s", err) - } - - return 0 -} - -func ext_trie_blake2_256_ordered_root_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - dataSpan := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - data := asMemorySlice(memory, dataSpan) - - t := trie.NewEmptyTrie() - var v [][]byte - err := scale.Unmarshal(data, &v) - if err != nil { - logger.Errorf("[ext_trie_blake2_256_ordered_root_version_1]: %s", err) - return 0 - } - - for i, val := range v { - key, err := scale.Marshal(big.NewInt(int64(i))) - if err != nil { - logger.Errorf("[ext_blake2_256_enumerated_trie_root]: %s", err) - return 0 - } - logger.Tracef("key 0x%x and value 0x%x", key, val) - - t.Put(key, val) - } - - // allocate memory for value and copy value to memory - ptr, err := ctx.Allocator.Allocate(32) - if err != nil { - logger.Errorf("[ext_trie_blake2_256_ordered_root_version_1]: %s", err) - return 0 - } - - hash, err := t.Hash() - if err != nil { - logger.Errorf("[ext_trie_blake2_256_ordered_root_version_1]: %s", err) - return 0 - } - - logger.Debugf("root hash: %s", hash) - copy(memory[ptr:ptr+32], hash[:]) - return int64(ptr) -} - -func ext_storage_root_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - storage := ctx.Storage - - root, err := storage.Root() - if err != nil { - logger.Errorf("failed to get storage root: %s", err) - return 0 - } - - logger.Debugf("root hash: %s", root) - - rootSpan, err := toWasmMemory(vm.Memory, root[:]) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return rootSpan -} - -func ext_storage_changes_root_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - logger.Debug("returning None") - - rootSpan, err := toWasmMemoryOptional(vm.Memory, nil) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return rootSpan -} - -func ext_crypto_start_batch_verify_version_1(_ *exec.VirtualMachine) int64 { - logger.Trace("executing...") - return 0 -} - -func ext_crypto_finish_batch_verify_version_1(_ *exec.VirtualMachine) int64 { - logger.Trace("executing...") - return 1 -} - -func ext_offchain_index_set_version_1(_ *exec.VirtualMachine) int64 { - logger.Trace("executing...") - return 0 -} - -func ext_default_child_storage_set_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - storage := ctx.Storage - memory := vm.Memory - - childStorageKeySpan := vm.GetCurrentFrame().Locals[0] - childStorageKey := asMemorySlice(memory, childStorageKeySpan) - keySpan := vm.GetCurrentFrame().Locals[1] - key := asMemorySlice(memory, keySpan) - valueSpan := vm.GetCurrentFrame().Locals[2] - value := asMemorySlice(memory, valueSpan) - - cp := make([]byte, len(value)) - copy(cp, value) - - err := storage.SetChildStorage(childStorageKey, key, cp) - if err != nil { - logger.Errorf("failed to set value in child storage: %s", err) - return 0 - } - - return 0 -} - -func ext_default_child_storage_get_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - childStorageKey := vm.GetCurrentFrame().Locals[0] - key := vm.GetCurrentFrame().Locals[1] - storage := ctx.Storage - memory := vm.Memory - - child, err := storage.GetChildStorage(asMemorySlice(memory, childStorageKey), asMemorySlice(memory, key)) - if err != nil { - logger.Errorf("failed to get child from child storage: %s", err) - return 0 - } - - value, err := toWasmMemoryOptional(memory, child) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return value -} - -func ext_default_child_storage_read_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - childStorageKey := vm.GetCurrentFrame().Locals[0] - key := vm.GetCurrentFrame().Locals[1] - valueOut := vm.GetCurrentFrame().Locals[2] - offset := vm.GetCurrentFrame().Locals[3] - storage := ctx.Storage - memory := vm.Memory - - value, err := storage.GetChildStorage(asMemorySlice(memory, childStorageKey), asMemorySlice(memory, key)) - if err != nil { - logger.Errorf("failed to get child storage: %s", err) - return 0 - } - - valueBuf, valueLen := runtime.Int64ToPointerAndSize(valueOut) - copy(memory[valueBuf:valueBuf+valueLen], value[offset:]) - - size := uint32(len(value[offset:])) - sizeBuf := make([]byte, 4) - binary.LittleEndian.PutUint32(sizeBuf, size) - - sizeSpan, err := toWasmMemoryOptional(memory, sizeBuf) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return sizeSpan -} - -func ext_default_child_storage_clear_version_1(vm *exec.VirtualMachine) int64 { - logger.Debug("executing...") - - childStorageKey := vm.GetCurrentFrame().Locals[0] - keySpan := vm.GetCurrentFrame().Locals[1] - memory := vm.Memory - storage := ctx.Storage - - keyToChild := asMemorySlice(memory, childStorageKey) - key := asMemorySlice(memory, keySpan) - - err := storage.ClearChildStorage(keyToChild, key) - if err != nil { - logger.Errorf("failed to clear child storage: %s", err) - } - return 0 -} - -func ext_default_child_storage_storage_kill_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - childStorageKeySpan := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - storage := ctx.Storage - - childStorageKey := asMemorySlice(memory, childStorageKeySpan) - storage.DeleteChild(childStorageKey) - return 0 -} - -func ext_default_child_storage_exists_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - childStorageKey := vm.GetCurrentFrame().Locals[0] - key := vm.GetCurrentFrame().Locals[1] - storage := ctx.Storage - memory := vm.Memory - - child, err := storage.GetChildStorage(asMemorySlice(memory, childStorageKey), asMemorySlice(memory, key)) - if err != nil { - logger.Errorf("failed to get child from child storage: %s", err) - return 0 - } - if child != nil { - return 1 - } - return 0 -} - -func ext_default_child_storage_clear_prefix_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - childStorageKey := vm.GetCurrentFrame().Locals[0] - prefixSpan := vm.GetCurrentFrame().Locals[1] - storage := ctx.Storage - memory := vm.Memory - - keyToChild := asMemorySlice(memory, childStorageKey) - prefix := asMemorySlice(memory, prefixSpan) - - err := storage.ClearPrefixInChild(keyToChild, prefix) - if err != nil { - logger.Errorf("failed to clear prefix in child: %s", err) - } - return 0 -} - -func ext_default_child_storage_root_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - childStorageKey := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - storage := ctx.Storage - - child, err := storage.GetChild(asMemorySlice(memory, childStorageKey)) - if err != nil { - logger.Errorf("failed to retrieve child: %s", err) - return 0 - } - - childRoot, err := child.Hash() - if err != nil { - logger.Errorf("failed to encode child root: %s", err) - return 0 - } - - root, err := toWasmMemoryOptional(memory, childRoot[:]) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return root -} - -func ext_default_child_storage_next_key_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - childStorageKey := vm.GetCurrentFrame().Locals[0] - key := vm.GetCurrentFrame().Locals[1] - memory := vm.Memory - storage := ctx.Storage - - child, err := storage.GetChildNextKey(asMemorySlice(memory, childStorageKey), asMemorySlice(memory, key)) - if err != nil { - logger.Errorf("failed to get child's next key: %s", err) - return 0 - } - - value, err := toWasmMemoryOptional(memory, child) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return value -} - -func ext_crypto_ed25519_public_keys_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - keyTypeID := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - - id := memory[keyTypeID : keyTypeID+4] - - ks, err := ctx.Keystore.GetKeystore(id) - if err != nil { - logger.Warnf("error for id 0x%x: %s", id, err) - ret, _ := toWasmMemory(memory, []byte{0}) - return ret - } - - if ks.Type() != crypto.Ed25519Type && ks.Type() != crypto.UnknownType { - logger.Warnf( - "keystore type for id 0x%x is %s and not the expected ed25519", - id, ks.Type()) - ret, _ := toWasmMemory(memory, []byte{0}) - return ret - } - - keys := ks.PublicKeys() - - var encodedKeys []byte - for _, key := range keys { - encodedKeys = append(encodedKeys, key.Encode()...) - } - - prefix, err := scale.Marshal(big.NewInt(int64(len(keys)))) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - ret, _ := toWasmMemory(memory, []byte{0}) - return ret - } - - ret, err := toWasmMemory(memory, append(prefix, encodedKeys...)) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - ret, _ = toWasmMemory(memory, []byte{0}) - return ret - } - - return ret -} - -func ext_crypto_ed25519_generate_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - keyTypeID := vm.GetCurrentFrame().Locals[0] - seedSpan := vm.GetCurrentFrame().Locals[1] - memory := vm.Memory - - id := memory[keyTypeID : keyTypeID+4] - seedBytes := asMemorySlice(memory, seedSpan) - - var seed *[]byte - err := scale.Unmarshal(seedBytes, &seed) - if err != nil { - logger.Warnf("cannot generate key: %s", err) - return 0 - } - - var kp crypto.Keypair - - if seed != nil { - kp, err = ed25519.NewKeypairFromMnenomic(string(*seed), "") - } else { - kp, err = ed25519.GenerateKeypair() - } - - if err != nil { - logger.Warnf("cannot generate key: %s", err) - return 0 - } - - ks, err := ctx.Keystore.GetKeystore(id) - if err != nil { - logger.Warnf("error for id 0x%x: %s", id, err) - return 0 - } - - err = ks.Insert(kp) - if err != nil { - logger.Warnf("failed to insert key: %s", err) - return 0 - } - - ret, err := toWasmMemorySized(memory, kp.Public().Encode(), 32) - if err != nil { - logger.Warnf("failed to allocate memory: %s", err) - return 0 - } - - logger.Debug( - "generated ed25519 keypair with resulting public key: " + - kp.Public().Hex()) - return int64(ret) -} - -func ext_crypto_ed25519_sign_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - keyTypeID := vm.GetCurrentFrame().Locals[0] - key := vm.GetCurrentFrame().Locals[1] - msg := vm.GetCurrentFrame().Locals[2] - memory := vm.Memory - - id := memory[keyTypeID : keyTypeID+4] - - pubKeyData := memory[key : key+32] - pubKey, err := ed25519.NewPublicKey(pubKeyData) - if err != nil { - logger.Errorf("failed to get public keys: %s", err) - return 0 - } - - ks, err := ctx.Keystore.GetKeystore(id) - if err != nil { - logger.Warnf("error for id 0x%x: %s", id, err) - ret, _ := toWasmMemoryOptional(memory, nil) - return ret - } - - var ret int64 - signingKey := ks.GetKeypair(pubKey) - if signingKey == nil { - logger.Error("could not find public key " + pubKey.Hex() + " in keystore") - ret, err = toWasmMemoryOptional(memory, nil) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - return 0 - } - return ret - } - - sig, err := signingKey.Sign(asMemorySlice(memory, msg)) - if err != nil { - logger.Error("could not sign message") - } - - ret, err = toWasmMemoryFixedSizeOptional(memory, sig) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - return 0 - } - - return ret -} - -func ext_crypto_ed25519_verify_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - sig := vm.GetCurrentFrame().Locals[0] - msg := vm.GetCurrentFrame().Locals[1] - key := vm.GetCurrentFrame().Locals[2] - memory := vm.Memory - sigVerifier := ctx.SigVerifier - - signature := memory[sig : sig+64] - message := asMemorySlice(memory, msg) - pubKeyData := memory[key : key+32] - - pubKey, err := ed25519.NewPublicKey(pubKeyData) - if err != nil { - logger.Error("failed to create public key") - return 0 - } - - if sigVerifier.IsStarted() { - signature := crypto.SignatureInfo{ - PubKey: pubKey.Encode(), - Sign: signature, - Msg: message, - VerifyFunc: ed25519.VerifySignature, - } - sigVerifier.Add(&signature) - return 1 - } - - if ok, err := pubKey.Verify(message, signature); err != nil || !ok { - logger.Error("failed to verify") - return 0 - } - - logger.Debug("verified ed25519 signature") - return 1 -} - -func ext_crypto_sr25519_public_keys_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - keyTypeID := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - - id := memory[keyTypeID : keyTypeID+4] - - ks, err := ctx.Keystore.GetKeystore(id) - if err != nil { - logger.Warnf("error for id 0x%x: %s", id, err) - ret, _ := toWasmMemory(memory, []byte{0}) - return ret - } - - if ks.Type() != crypto.Sr25519Type && ks.Type() != crypto.UnknownType { - logger.Warnf( - "keystore type for id 0x%x is %s and not the expected sr25519", - id, ks.Type()) - ret, _ := toWasmMemory(memory, []byte{0}) - return ret - } - - keys := ks.PublicKeys() - - var encodedKeys []byte - for _, key := range keys { - encodedKeys = append(encodedKeys, key.Encode()...) - } - - prefix, err := scale.Marshal(big.NewInt(int64(len(keys)))) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - ret, _ := toWasmMemory(memory, []byte{0}) - return ret - } - - ret, err := toWasmMemory(memory, append(prefix, encodedKeys...)) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - ret, _ = toWasmMemory(memory, []byte{0}) - return ret - } - - return ret -} - -func ext_crypto_sr25519_generate_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - keyTypeID := vm.GetCurrentFrame().Locals[0] - seedSpan := vm.GetCurrentFrame().Locals[1] - memory := vm.Memory - - id := memory[keyTypeID : keyTypeID+4] - seedBytes := asMemorySlice(memory, seedSpan) - - var seed *[]byte - err := scale.Unmarshal(seedBytes, &seed) - if err != nil { - logger.Warnf("cannot generate key: %s", err) - return 0 - } - - var kp crypto.Keypair - if seed != nil { - kp, err = sr25519.NewKeypairFromMnenomic(string(*seed), "") - } else { - kp, err = sr25519.GenerateKeypair() - } - - if err != nil { - logger.Tracef("cannot generate key: %s", err) - panic(err) - } - - ks, err := ctx.Keystore.GetKeystore(id) - if err != nil { - logger.Warnf("error for id 0x%x: %s", id, err) - return 0 - } - - err = ks.Insert(kp) - if err != nil { - logger.Warnf("failed to insert key: %s", err) - return 0 - } - - ret, err := toWasmMemorySized(memory, kp.Public().Encode(), 32) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - return 0 - } - - logger.Debug( - "generated sr25519 keypair with resulting public key: " + - kp.Public().Hex()) - return int64(ret) -} - -func ext_crypto_sr25519_sign_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - keyTypeID := vm.GetCurrentFrame().Locals[0] - key := vm.GetCurrentFrame().Locals[1] - msg := vm.GetCurrentFrame().Locals[2] - memory := vm.Memory - - emptyRet, _ := toWasmMemoryOptional(memory, nil) - - id := memory[keyTypeID : keyTypeID+4] - - ks, err := ctx.Keystore.GetKeystore(id) - if err != nil { - logger.Warnf("error for id 0x%x: %s", id, err) - return emptyRet - } - - var ret int64 - pubKey, err := sr25519.NewPublicKey(memory[key : key+32]) - if err != nil { - logger.Errorf("failed to get public key: %s", err) - return emptyRet - } - - signingKey := ks.GetKeypair(pubKey) - if signingKey == nil { - logger.Error("could not find public key " + pubKey.Hex() + " in keystore") - return emptyRet - } - - msgData := asMemorySlice(memory, msg) - sig, err := signingKey.Sign(msgData) - if err != nil { - logger.Errorf("could not sign message: %s", err) - return emptyRet - } - - ret, err = toWasmMemoryFixedSizeOptional(memory, sig) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - return emptyRet - } - - return ret -} - -func ext_crypto_sr25519_verify_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - sig := vm.GetCurrentFrame().Locals[0] - msg := vm.GetCurrentFrame().Locals[1] - key := vm.GetCurrentFrame().Locals[2] - memory := vm.Memory - sigVerifier := ctx.SigVerifier - - message := asMemorySlice(memory, msg) - signature := memory[sig : sig+64] - - pub, err := sr25519.NewPublicKey(memory[key : key+32]) - if err != nil { - logger.Error("invalid sr25519 public key") - return 0 - } - - logger.Debugf( - "pub=%s; message=0x%x; signature=0x%x", - pub.Hex(), message, signature) - - if sigVerifier.IsStarted() { - signature := crypto.SignatureInfo{ - PubKey: pub.Encode(), - Sign: signature, - Msg: message, - VerifyFunc: sr25519.VerifySignature, - } - sigVerifier.Add(&signature) - return 1 - } - - if ok, err := pub.VerifyDeprecated(message, signature); err != nil || !ok { - logger.Debugf("failed to validate signature: %s", err) - // this fails at block 3876, however based on discussions this seems to be expected - return 1 - } - - logger.Debug("verified sr25519 signature") - return 1 -} - -func ext_crypto_secp256k1_ecdsa_recover_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - sig := vm.GetCurrentFrame().Locals[0] - msg := vm.GetCurrentFrame().Locals[1] - memory := vm.Memory - - // msg must be the 32-byte hash of the message to be signed. - // sig must be a 65-byte compact ECDSA signature containing the - // recovery id as the last element - message := memory[msg : msg+32] - signature := memory[sig : sig+65] - - if signature[64] == 27 { - signature[64] = 0 - } - - if signature[64] == 28 { - signature[64] = 1 - } - - pub, err := secp256k1.RecoverPublicKey(message, signature) - if err != nil { - logger.Errorf("failed to recover public key: %s", err) - var ret int64 - ret, err = toWasmMemoryResult(memory, nil) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - return 0 - } - return ret - } - - logger.Debugf( - "recovered public key of length %d: 0x%x", - len(pub), pub) - - ret, err := toWasmMemoryResult(memory, pub[1:]) - if err != nil { - logger.Errorf("failed to allocate memory: %s", err) - return 0 - } - - return ret -} - -func ext_hashing_keccak_256_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - dataSpan := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - - data := asMemorySlice(memory, dataSpan) - - hash, err := common.Keccak256(data) - if err != nil { - logger.Errorf("[ext_hashing_keccak_256_version_1]: %s", err) - return 0 - } - - logger.Debugf("data 0x%x has hash %s", data, hash) - - out, err := toWasmMemorySized(memory, hash[:], 32) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return int64(out) -} - -func ext_hashing_sha2_256_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - dataSpan := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - - data := asMemorySlice(memory, dataSpan) - hash := common.Sha256(data) - - logger.Debugf("data 0x%x hash hash %x", data, hash) - - out, err := toWasmMemorySized(memory, hash[:], 32) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return int64(out) -} - -func ext_hashing_blake2_128_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - dataSpan := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - - data := asMemorySlice(memory, dataSpan) - - hash, err := common.Blake2b128(data) - if err != nil { - logger.Errorf("[ext_hashing_blake2_128_version_1]: %s", err) - return 0 - } - - logger.Debugf("data 0x%x has hash 0x%x", data, hash) - - out, err := toWasmMemorySized(memory, hash, 16) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return int64(out) -} - -func ext_hashing_twox_256_version_1(vm *exec.VirtualMachine) int64 { - logger.Trace("executing...") - - dataSpan := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - - data := asMemorySlice(memory, dataSpan) - - hash, err := common.Twox256(data) - if err != nil { - logger.Errorf("[ext_hashing_twox_256_version_1]: %s", err) - return 0 - } - - logger.Debugf("data 0x%x has hash %s", data, hash) - - out, err := toWasmMemorySized(memory, hash[:], 32) - if err != nil { - logger.Errorf("failed to allocate: %s", err) - return 0 - } - - return int64(out) -} - -func ext_trie_blake2_256_root_version_1(vm *exec.VirtualMachine) int64 { - logger.Debug("executing...") - - dataSpan := vm.GetCurrentFrame().Locals[0] - memory := vm.Memory - - data := asMemorySlice(memory, dataSpan) - - t := trie.NewEmptyTrie() - - // this function is expecting an array of (key, value) tuples - type kv struct { - Key, Value []byte - } - - var kvs []kv - if err := scale.Unmarshal(data, &kvs); err != nil { - logger.Errorf("[ext_trie_blake2_256_root_version_1]: %s", err) - return 0 - } - - for _, kv := range kvs { - t.Put(kv.Key, kv.Value) - } - - // allocate memory for value and copy value to memory - ptr, err := ctx.Allocator.Allocate(32) - if err != nil { - logger.Errorf("[ext_trie_blake2_256_root_version_1]: %s", err) - return 0 - } - - hash, err := t.Hash() - if err != nil { - logger.Errorf("[ext_trie_blake2_256_root_version_1]: %s", err) - return 0 - } - - logger.Debugf("root hash: %s", hash) - copy(memory[ptr:ptr+32], hash[:]) - return int64(ptr) -} - -// Convert 64bit wasm span descriptor to Go memory slice -func asMemorySlice(memory []byte, span int64) []byte { - ptr, size := runtime.Int64ToPointerAndSize(span) - return memory[ptr : ptr+size] -} - -// Copy a byte slice of a fixed size to wasm memory and return resulting pointer -func toWasmMemorySized(memory, data []byte, size uint32) (uint32, error) { - if int(size) != len(data) { - return 0, errors.New("internal byte array size missmatch") - } - - allocator := ctx.Allocator - out, err := allocator.Allocate(size) - if err != nil { - return 0, err - } - - copy(memory[out:out+size], data) - return out, nil -} - -// Wraps slice in optional.Bytes and copies result to wasm memory. Returns resulting 64bit span descriptor -func toWasmMemoryOptional(memory, data []byte) (int64, error) { - var opt *[]byte - if data != nil { - opt = &data - } - - enc, err := scale.Marshal(opt) - if err != nil { - return 0, err - } - - return toWasmMemory(memory, enc) -} - -// Copy a byte slice to wasm memory and return the resulting 64bit span descriptor -func toWasmMemory(memory, data []byte) (int64, error) { - allocator := ctx.Allocator - size := uint32(len(data)) - - out, err := allocator.Allocate(size) - if err != nil { - return 0, err - } - - copy(memory[out:out+size], data) - return runtime.PointerAndSizeToInt64(int32(out), int32(size)), nil -} - -// Wraps slice in optional and copies result to wasm memory. Returns resulting 64bit span descriptor -func toWasmMemoryOptionalUint32(memory []byte, data *uint32) (int64, error) { - var opt *uint32 - if data != nil { - temp := *data - opt = &temp - } - - enc, err := scale.Marshal(opt) - if err != nil { - return int64(0), err - } - return toWasmMemory(memory, enc) -} - -// Wraps slice in optional.FixedSizeBytes and copies result to wasm memory. Returns resulting 64bit span descriptor -func toWasmMemoryFixedSizeOptional(memory, data []byte) (int64, error) { - var opt [64]byte - copy(opt[:], data[:]) - enc, err := scale.Marshal(&opt) - if err != nil { - return 0, err - } - return toWasmMemory(memory, enc) -} - -// Wraps slice in Result type and copies result to wasm memory. Returns resulting 64bit span descriptor -func toWasmMemoryResult(memory, data []byte) (int64, error) { - var res *rtype.Result - if len(data) == 0 { - res = rtype.NewResult(byte(1), nil) - } else { - res = rtype.NewResult(byte(0), data) - } - - enc, err := res.Encode() - if err != nil { - return 0, err - } - - return toWasmMemory(memory, enc) -} diff --git a/lib/runtime/life/resolver_test.go b/lib/runtime/life/resolver_test.go deleted file mode 100644 index f334c05b4e..0000000000 --- a/lib/runtime/life/resolver_test.go +++ /dev/null @@ -1,1007 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package life - -import ( - "bytes" - "encoding/binary" - "sort" - "testing" - - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/types" - "github.com/ChainSafe/gossamer/lib/crypto" - "github.com/ChainSafe/gossamer/lib/crypto/ed25519" - "github.com/ChainSafe/gossamer/lib/crypto/secp256k1" - "github.com/ChainSafe/gossamer/lib/crypto/sr25519" - "github.com/ChainSafe/gossamer/lib/keystore" - "github.com/ChainSafe/gossamer/lib/runtime" - "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/lib/trie" - "github.com/ChainSafe/gossamer/pkg/scale" - "github.com/stretchr/testify/require" -) - -var testChildKey = []byte("childKey") -var testKey = []byte("key") -var testValue = []byte("value") - -func Test_ext_allocator_malloc_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - size := make([]byte, 4) - binary.LittleEndian.PutUint32(size, 1) - enc, err := scale.Marshal(size) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_allocator_malloc_version_1", enc) - require.NoError(t, err) - - var res []byte - err = scale.Unmarshal(ret, &res) - require.NoError(t, err) - require.Equal(t, size, res) -} - -func Test_ext_hashing_blake2_256_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - data := []byte("helloworld") - enc, err := scale.Marshal(data) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_hashing_blake2_256_version_1", enc) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(ret, &hash) - require.NoError(t, err) - - expected, err := common.Blake2bHash(data) - require.NoError(t, err) - require.Equal(t, expected[:], hash) -} - -func Test_ext_hashing_twox_128_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - data := []byte("helloworld") - enc, err := scale.Marshal(data) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_hashing_twox_128_version_1", enc) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(ret, &hash) - require.NoError(t, err) - - expected, err := common.Twox128Hash(data) - require.NoError(t, err) - require.Equal(t, expected[:], hash) -} - -func Test_ext_storage_get_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testkey := []byte("noot") - testvalue := []byte{1, 2} - ctx.Storage.Set(testkey, testvalue) - - enc, err := scale.Marshal(testkey) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_storage_get_version_1", enc) - require.NoError(t, err) - - var value *[]byte - err = scale.Unmarshal(ret, &value) - require.NoError(t, err) - require.NotNil(t, value) - require.Equal(t, testvalue, *value) -} - -func Test_ext_storage_set_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testkey := []byte("noot") - testvalue := []byte("washere") - - encKey, err := scale.Marshal(testkey) - require.NoError(t, err) - encValue, err := scale.Marshal(testvalue) - require.NoError(t, err) - - _, err = inst.Exec("rtm_ext_storage_set_version_1", append(encKey, encValue...)) - require.NoError(t, err) - - val := ctx.Storage.Get(testkey) - require.Equal(t, testvalue, val) -} - -func Test_ext_storage_next_key_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testkey := []byte("noot") - ctx.Storage.Set(testkey, []byte{1}) - - nextkey := []byte("oot") - ctx.Storage.Set(nextkey, []byte{1}) - - enc, err := scale.Marshal(testkey) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_storage_next_key_version_1", enc) - require.NoError(t, err) - - var next *[]byte - err = scale.Unmarshal(ret, &next) - require.NoError(t, err) - require.NotNil(t, next) - require.Equal(t, nextkey, *next) -} - -func Test_ext_hashing_twox_64_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - data := []byte("helloworld") - enc, err := scale.Marshal(data) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_hashing_twox_64_version_1", enc) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(ret, &hash) - require.NoError(t, err) - - expected, err := common.Twox64(data) - require.NoError(t, err) - require.Equal(t, expected[:], hash) -} - -func Test_ext_storage_clear_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testkey := []byte("noot") - ctx.Storage.Set(testkey, []byte{1}) - - enc, err := scale.Marshal(testkey) - require.NoError(t, err) - - _, err = inst.Exec("rtm_ext_storage_clear_version_1", enc) - require.NoError(t, err) - - val := ctx.Storage.Get(testkey) - require.Nil(t, val) -} - -func Test_ext_storage_clear_prefix_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testkey := []byte("noot") - ctx.Storage.Set(testkey, []byte{1}) - - testkey2 := []byte("spaghet") - ctx.Storage.Set(testkey2, []byte{2}) - - enc, err := scale.Marshal(testkey[:3]) - require.NoError(t, err) - - _, err = inst.Exec("rtm_ext_storage_clear_prefix_version_1", enc) - require.NoError(t, err) - - val := ctx.Storage.Get(testkey) - require.Nil(t, val) - - val = ctx.Storage.Get(testkey2) - require.NotNil(t, val) -} - -func Test_ext_storage_append_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testkey := []byte("noot") - testvalue := []byte("was") - testvalueAppend := []byte("here") - - encKey1, err := scale.Marshal(testkey) - require.NoError(t, err) - - encVal1, err := scale.Marshal(testvalue) - require.NoError(t, err) - - doubleEncVal1, err := scale.Marshal(encVal1) - require.NoError(t, err) - - encArr1, err := scale.Marshal([][]byte{testvalue}) - require.NoError(t, err) - - // place SCALE encoded value in storage - _, err = inst.Exec("rtm_ext_storage_append_version_1", append(encKey1, doubleEncVal1...)) - require.NoError(t, err) - - val := ctx.Storage.Get(testkey) - require.Equal(t, encArr1, val) - - encValueAppend1, err := scale.Marshal(testvalueAppend) - require.NoError(t, err) - - doubleEncValueAppend1, err := scale.Marshal(encValueAppend1) - require.NoError(t, err) - - _, err = inst.Exec("rtm_ext_storage_append_version_1", append(encKey1, doubleEncValueAppend1...)) - require.NoError(t, err) - - ret := ctx.Storage.Get(testkey) - require.NotNil(t, ret) - - var dec1 [][]byte - err = scale.Unmarshal(ret, &dec1) - require.NoError(t, err) - - require.Equal(t, 2, len(dec1)) - require.Equal(t, testvalue, dec1[0]) - require.Equal(t, testvalueAppend, dec1[1]) - - expected1, err := scale.Marshal([][]byte{testvalue, testvalueAppend}) - require.NoError(t, err) - require.Equal(t, expected1, ret) -} - -func Test_ext_trie_blake2_256_ordered_root_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testvalues := []string{"static", "even-keeled", "Future-proofed"} - encValues, err := scale.Marshal(testvalues) - require.NoError(t, err) - - res, err := inst.Exec("rtm_ext_trie_blake2_256_ordered_root_version_1", encValues) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(res, &hash) - require.NoError(t, err) - - expected := common.MustHexToHash("0xd847b86d0219a384d11458e829e9f4f4cce7e3cc2e6dcd0e8a6ad6f12c64a737") - require.Equal(t, expected[:], hash) -} - -func Test_ext_storage_root_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testkey := []byte("noot") - testvalue := []byte("washere") - - encKey, err := scale.Marshal(testkey) - require.NoError(t, err) - encValue, err := scale.Marshal(testvalue) - require.NoError(t, err) - - _, err = inst.Exec("rtm_ext_storage_set_version_1", append(encKey, encValue...)) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_storage_root_version_1", []byte{}) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(ret, &hash) - require.NoError(t, err) - - tt := trie.NewEmptyTrie() - tt.Put([]byte("noot"), []byte("washere")) - - expected := tt.MustHash() - require.Equal(t, expected[:], hash) -} - -func Test_ext_storage_exists_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testkey := []byte("noot") - testvalue := []byte{1, 2} - ctx.Storage.Set(testkey, testvalue) - - enc, err := scale.Marshal(testkey) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_storage_exists_version_1", enc) - require.NoError(t, err) - require.Equal(t, byte(1), ret[0]) - - nonexistent := []byte("none") - enc, err = scale.Marshal(nonexistent) - require.NoError(t, err) - - ret, err = inst.Exec("rtm_ext_storage_exists_version_1", enc) - require.NoError(t, err) - require.Equal(t, byte(0), ret[0]) -} - -func Test_ext_default_child_storage_set_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - err := ctx.Storage.SetChild(testChildKey, trie.NewEmptyTrie()) - require.NoError(t, err) - - // Check if value is not set - val, err := ctx.Storage.GetChildStorage(testChildKey, testKey) - require.NoError(t, err) - require.Nil(t, val) - - encChildKey, err := scale.Marshal(testChildKey) - require.NoError(t, err) - - encKey, err := scale.Marshal(testKey) - require.NoError(t, err) - - encVal, err := scale.Marshal(testValue) - require.NoError(t, err) - - _, err = inst.Exec("rtm_ext_default_child_storage_set_version_1", append(append(encChildKey, encKey...), encVal...)) - require.NoError(t, err) - - val, err = ctx.Storage.GetChildStorage(testChildKey, testKey) - require.NoError(t, err) - require.Equal(t, testValue, val) -} - -func Test_ext_default_child_storage_get_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - err := ctx.Storage.SetChild(testChildKey, trie.NewEmptyTrie()) - require.NoError(t, err) - - err = ctx.Storage.SetChildStorage(testChildKey, testKey, testValue) - require.NoError(t, err) - - encChildKey, err := scale.Marshal(testChildKey) - require.NoError(t, err) - - encKey, err := scale.Marshal(testKey) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_default_child_storage_get_version_1", append(encChildKey, encKey...)) - require.NoError(t, err) - - var read *[]byte - err = scale.Unmarshal(ret, &read) - require.NoError(t, err) - require.NotNil(t, read) -} - -func Test_ext_default_child_storage_read_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - err := ctx.Storage.SetChild(testChildKey, trie.NewEmptyTrie()) - require.NoError(t, err) - - err = ctx.Storage.SetChildStorage(testChildKey, testKey, testValue) - require.NoError(t, err) - - testOffset := uint32(2) - testBufferSize := uint32(100) - - encChildKey, err := scale.Marshal(testChildKey) - require.NoError(t, err) - - encKey, err := scale.Marshal(testKey) - require.NoError(t, err) - - encBufferSize, err := scale.Marshal(testBufferSize) - require.NoError(t, err) - - encOffset, err := scale.Marshal(testOffset) - require.NoError(t, err) - - ret, err := inst.Exec( - "rtm_ext_default_child_storage_read_version_1", - append(append(encChildKey, encKey...), - append(encOffset, encBufferSize...)...)) - require.NoError(t, err) - - var read *[]byte - err = scale.Unmarshal(ret, &read) - require.NoError(t, err) - require.NotNil(t, read) - - val := *read - require.Equal(t, testValue[testOffset:], val[:len(testValue)-int(testOffset)]) -} - -func Test_ext_default_child_storage_clear_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - err := ctx.Storage.SetChild(testChildKey, trie.NewEmptyTrie()) - require.NoError(t, err) - - err = ctx.Storage.SetChildStorage(testChildKey, testKey, testValue) - require.NoError(t, err) - - // Confirm if value is set - val, err := ctx.Storage.GetChildStorage(testChildKey, testKey) - require.NoError(t, err) - require.Equal(t, testValue, val) - - encChildKey, err := scale.Marshal(testChildKey) - require.NoError(t, err) - - encKey, err := scale.Marshal(testKey) - require.NoError(t, err) - - _, err = inst.Exec("rtm_ext_default_child_storage_clear_version_1", append(encChildKey, encKey...)) - require.NoError(t, err) - - val, err = ctx.Storage.GetChildStorage(testChildKey, testKey) - require.NoError(t, err) - require.Nil(t, val) -} - -func Test_ext_default_child_storage_storage_kill_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - err := ctx.Storage.SetChild(testChildKey, trie.NewEmptyTrie()) - require.NoError(t, err) - - // Confirm if value is set - child, err := ctx.Storage.GetChild(testChildKey) - require.NoError(t, err) - require.NotNil(t, child) - - encChildKey, err := scale.Marshal(testChildKey) - require.NoError(t, err) - - _, err = inst.Exec("rtm_ext_default_child_storage_storage_kill_version_1", encChildKey) - require.NoError(t, err) - - child, _ = ctx.Storage.GetChild(testChildKey) - require.Nil(t, child) -} - -func Test_ext_default_child_storage_exists_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - err := ctx.Storage.SetChild(testChildKey, trie.NewEmptyTrie()) - require.NoError(t, err) - - err = ctx.Storage.SetChildStorage(testChildKey, testKey, testValue) - require.NoError(t, err) - - encChildKey, err := scale.Marshal(testChildKey) - require.NoError(t, err) - - encKey, err := scale.Marshal(testKey) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_default_child_storage_exists_version_1", append(encChildKey, encKey...)) - require.NoError(t, err) - - var read *[]byte - err = scale.Unmarshal(ret, &read) - require.NoError(t, err) - require.NotNil(t, read) -} - -func Test_ext_default_child_storage_clear_prefix_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - prefix := []byte("key") - - testKeyValuePair := []struct { - key []byte - value []byte - }{ - {[]byte("keyOne"), []byte("value1")}, - {[]byte("keyTwo"), []byte("value2")}, - {[]byte("keyThree"), []byte("value3")}, - } - - err := ctx.Storage.SetChild(testChildKey, trie.NewEmptyTrie()) - require.NoError(t, err) - - for _, kv := range testKeyValuePair { - err = ctx.Storage.SetChildStorage(testChildKey, kv.key, kv.value) - require.NoError(t, err) - } - - // Confirm if value is set - keys, err := ctx.Storage.(*storage.TrieState).GetKeysWithPrefixFromChild(testChildKey, prefix) - require.NoError(t, err) - require.Equal(t, 3, len(keys)) - - encChildKey, err := scale.Marshal(testChildKey) - require.NoError(t, err) - - encPrefix, err := scale.Marshal(prefix) - require.NoError(t, err) - - _, err = inst.Exec("rtm_ext_default_child_storage_clear_prefix_version_1", append(encChildKey, encPrefix...)) - require.NoError(t, err) - - keys, err = ctx.Storage.(*storage.TrieState).GetKeysWithPrefixFromChild(testChildKey, prefix) - require.NoError(t, err) - require.Equal(t, 0, len(keys)) -} - -func Test_ext_default_child_storage_root_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - err := ctx.Storage.SetChild(testChildKey, trie.NewEmptyTrie()) - require.NoError(t, err) - - err = ctx.Storage.SetChildStorage(testChildKey, testKey, testValue) - require.NoError(t, err) - - child, err := ctx.Storage.GetChild(testChildKey) - require.NoError(t, err) - - rootHash, err := child.Hash() - require.NoError(t, err) - - encChildKey, err := scale.Marshal(testChildKey) - require.NoError(t, err) - encKey, err := scale.Marshal(testKey) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_default_child_storage_root_version_1", append(encChildKey, encKey...)) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(ret, &hash) - require.NoError(t, err) - - // Convert decoded interface to common Hash - actualValue := common.BytesToHash(hash) - require.Equal(t, rootHash, actualValue) -} - -func Test_ext_default_child_storage_next_key_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testKeyValuePair := []struct { - key []byte - value []byte - }{ - {[]byte("apple"), []byte("value1")}, - {[]byte("key"), []byte("value2")}, - } - - key := testKeyValuePair[0].key - - err := ctx.Storage.SetChild(testChildKey, trie.NewEmptyTrie()) - require.NoError(t, err) - - for _, kv := range testKeyValuePair { - err = ctx.Storage.SetChildStorage(testChildKey, kv.key, kv.value) - require.NoError(t, err) - } - - encChildKey, err := scale.Marshal(testChildKey) - require.NoError(t, err) - - encKey, err := scale.Marshal(key) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_default_child_storage_next_key_version_1", append(encChildKey, encKey...)) - require.NoError(t, err) - - var read *[]byte - err = scale.Unmarshal(ret, &read) - require.NoError(t, err) - require.NotNil(t, read) -} - -func Test_ext_crypto_ed25519_public_keys_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - idData := []byte(keystore.DumyName) - ks, _ := ctx.Keystore.GetKeystore(idData) - require.Equal(t, 0, ks.Size()) - - size := 5 - pubKeys := make([][32]byte, size) - for i := range pubKeys { - kp, err := ed25519.GenerateKeypair() - require.NoError(t, err) - - ks.Insert(kp) - copy(pubKeys[i][:], kp.Public().Encode()) - } - - sort.Slice(pubKeys, func(i int, j int) bool { return pubKeys[i][0] < pubKeys[j][0] }) - - res, err := inst.Exec("rtm_ext_crypto_ed25519_public_keys_version_1", idData) - require.NoError(t, err) - - var out []byte - err = scale.Unmarshal(res, &out) - require.NoError(t, err) - - var ret [][32]byte - err = scale.Unmarshal(out, &ret) - require.NoError(t, err) - - sort.Slice(ret, func(i int, j int) bool { return ret[i][0] < ret[j][0] }) - require.Equal(t, pubKeys, ret) -} - -func Test_ext_crypto_ed25519_generate_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - idData := []byte(keystore.AccoName) - ks, _ := ctx.Keystore.GetKeystore(idData) - require.Equal(t, 0, ks.Size()) - - mnemonic, err := crypto.NewBIP39Mnemonic() - require.NoError(t, err) - - mnemonicBytes := []byte(mnemonic) - var data = &mnemonicBytes - seedData, err := scale.Marshal(data) - require.NoError(t, err) - - params := append(idData, seedData...) - - ret, err := inst.Exec("rtm_ext_crypto_ed25519_generate_version_1", params) - require.NoError(t, err) - - var out []byte - err = scale.Unmarshal(ret, &out) - require.NoError(t, err) - - pubKey, err := ed25519.NewPublicKey(out) - require.NoError(t, err) - require.Equal(t, 1, ks.Size()) - kp := ks.GetKeypair(pubKey) - require.NotNil(t, kp) -} - -func Test_ext_crypto_ed25519_sign_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - kp, err := ed25519.GenerateKeypair() - require.NoError(t, err) - - idData := []byte(keystore.AccoName) - ks, _ := ctx.Keystore.GetKeystore(idData) - ks.Insert(kp) - - pubKeyData := kp.Public().Encode() - encPubKey, err := scale.Marshal(pubKeyData) - require.NoError(t, err) - - msgData := []byte("Hello world!") - encMsg, err := scale.Marshal(msgData) - require.NoError(t, err) - - res, err := inst.Exec("rtm_ext_crypto_ed25519_sign_version_1", append(append(idData, encPubKey...), encMsg...)) - require.NoError(t, err) - - var out []byte - err = scale.Unmarshal(res, &out) - require.NoError(t, err) - - var val *[64]byte - err = scale.Unmarshal(out, &val) - require.NoError(t, err) - require.NotNil(t, val) - - value := make([]byte, 64) - copy(value[:], val[:]) - - ok, err := kp.Public().Verify(msgData, value) - require.NoError(t, err) - require.True(t, ok) -} - -func Test_ext_crypto_ed25519_verify_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - kp, err := ed25519.GenerateKeypair() - require.NoError(t, err) - - idData := []byte(keystore.AccoName) - ks, _ := ctx.Keystore.GetKeystore(idData) - ks.Insert(kp) - - pubKeyData := kp.Public().Encode() - encPubKey, err := scale.Marshal(pubKeyData) - require.NoError(t, err) - - msgData := []byte("Hello world!") - encMsg, err := scale.Marshal(msgData) - require.NoError(t, err) - - sign, err := kp.Private().Sign(msgData) - require.NoError(t, err) - encSign, err := scale.Marshal(sign) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_crypto_ed25519_verify_version_1", append(append(encSign, encMsg...), encPubKey...)) - require.NoError(t, err) - - var read *[]byte - err = scale.Unmarshal(ret, &read) - require.NoError(t, err) - require.NotNil(t, read) -} - -func Test_ext_crypto_sr25519_public_keys_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - idData := []byte(keystore.DumyName) - ks, _ := ctx.Keystore.GetKeystore(idData) - require.Equal(t, 0, ks.Size()) - - size := 5 - pubKeys := make([][32]byte, size) - for i := range pubKeys { - kp, err := sr25519.GenerateKeypair() - require.NoError(t, err) - - ks.Insert(kp) - copy(pubKeys[i][:], kp.Public().Encode()) - } - - sort.Slice(pubKeys, func(i int, j int) bool { return pubKeys[i][0] < pubKeys[j][0] }) - - res, err := inst.Exec("rtm_ext_crypto_sr25519_public_keys_version_1", idData) - require.NoError(t, err) - - var out []byte - err = scale.Unmarshal(res, &out) - require.NoError(t, err) - - var ret [][32]byte - err = scale.Unmarshal(out, &ret) - require.NoError(t, err) - - sort.Slice(ret, func(i int, j int) bool { return ret[i][0] < ret[j][0] }) - require.Equal(t, pubKeys, ret) -} - -func Test_ext_crypto_sr25519_generate_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - idData := []byte(keystore.AccoName) - ks, _ := ctx.Keystore.GetKeystore(idData) - require.Equal(t, 0, ks.Size()) - - mnemonic, err := crypto.NewBIP39Mnemonic() - require.NoError(t, err) - - mnemonicBytes := []byte(mnemonic) - var data = &mnemonicBytes - seedData, err := scale.Marshal(data) - require.NoError(t, err) - - params := append(idData, seedData...) - - ret, err := inst.Exec("rtm_ext_crypto_sr25519_generate_version_1", params) - require.NoError(t, err) - - var out []byte - err = scale.Unmarshal(ret, &out) - require.NoError(t, err) - - pubKey, err := ed25519.NewPublicKey(out) - require.NoError(t, err) - require.Equal(t, 1, ks.Size()) - - kp := ks.GetKeypair(pubKey) - require.NotNil(t, kp) -} - -func Test_ext_crypto_sr25519_sign_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - kp, err := sr25519.GenerateKeypair() - require.NoError(t, err) - - idData := []byte(keystore.AccoName) - ks, _ := ctx.Keystore.GetKeystore(idData) - require.Equal(t, 0, ks.Size()) - - ks.Insert(kp) - - pubKeyData := kp.Public().Encode() - encPubKey, err := scale.Marshal(pubKeyData) - require.NoError(t, err) - - msgData := []byte("Hello world!") - encMsg, err := scale.Marshal(msgData) - require.NoError(t, err) - - res, err := inst.Exec("rtm_ext_crypto_sr25519_sign_version_1", append(append(idData, encPubKey...), encMsg...)) - require.NoError(t, err) - - var out []byte - err = scale.Unmarshal(res, &out) - require.NoError(t, err) - - var val *[64]byte - err = scale.Unmarshal(out, &val) - require.NoError(t, err) - require.NotNil(t, val) - - value := make([]byte, 64) - copy(value[:], val[:]) - - ok, err := kp.Public().Verify(msgData, value) - require.NoError(t, err) - require.True(t, ok) -} - -func Test_ext_crypto_sr25519_verify_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - kp, err := sr25519.GenerateKeypair() - require.NoError(t, err) - - idData := []byte(keystore.AccoName) - ks, _ := ctx.Keystore.GetKeystore(idData) - require.Equal(t, 0, ks.Size()) - - pubKeyData := kp.Public().Encode() - encPubKey, err := scale.Marshal(pubKeyData) - require.NoError(t, err) - - msgData := []byte("Hello world!") - encMsg, err := scale.Marshal(msgData) - require.NoError(t, err) - - sign, err := kp.Private().Sign(msgData) - require.NoError(t, err) - encSign, err := scale.Marshal(sign) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_crypto_sr25519_verify_version_1", append(append(encSign, encMsg...), encPubKey...)) - require.NoError(t, err) - - var read *[]byte - err = scale.Unmarshal(ret, &read) - require.NoError(t, err) - require.NotNil(t, read) -} - -func Test_ext_crypto_secp256k1_ecdsa_recover_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - msgData := []byte("Hello world!") - blakeHash, err := common.Blake2bHash(msgData) - require.NoError(t, err) - - kp, err := secp256k1.GenerateKeypair() - require.NoError(t, err) - - sigData, err := kp.Private().Sign(blakeHash.ToBytes()) - require.NoError(t, err) - - expectedPubKey := kp.Public().Encode() - - encSign, err := scale.Marshal(sigData) - require.NoError(t, err) - encMsg, err := scale.Marshal(blakeHash.ToBytes()) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_crypto_secp256k1_ecdsa_recover_version_1", append(encSign, encMsg...)) - require.NoError(t, err) - - var out []byte - err = scale.Unmarshal(ret, &out) - require.NoError(t, err) - - buf := &bytes.Buffer{} - buf.Write(out) - - uncomPubKey, err := new(types.Result).Decode(buf) - require.NoError(t, err) - rawPub := uncomPubKey.Value() - require.Equal(t, 64, len(rawPub)) - - publicKey := new(secp256k1.PublicKey) - - // Generates [33]byte compressed key from uncompressed [65]byte public key. - err = publicKey.UnmarshalPubkey(append([]byte{4}, rawPub...)) - require.NoError(t, err) - require.Equal(t, expectedPubKey, publicKey.Encode()) -} - -func Test_ext_hashing_keccak_256_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - data := []byte("helloworld") - enc, err := scale.Marshal(data) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_hashing_keccak_256_version_1", enc) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(ret, &hash) - require.NoError(t, err) - - expected, err := common.Keccak256(data) - require.NoError(t, err) - require.Equal(t, expected[:], hash) -} - -func Test_ext_hashing_sha2_256_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - data := []byte("helloworld") - enc, err := scale.Marshal(data) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_hashing_sha2_256_version_1", enc) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(ret, &hash) - require.NoError(t, err) - - expected := common.Sha256(data) - require.Equal(t, expected[:], hash) -} - -func Test_ext_hashing_blake2_128_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - data := []byte("helloworld") - enc, err := scale.Marshal(data) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_hashing_blake2_128_version_1", enc) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(ret, &hash) - require.NoError(t, err) - - expected, err := common.Blake2b128(data) - require.NoError(t, err) - require.Equal(t, expected[:], hash) -} - -func Test_ext_hashing_twox_256_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - data := []byte("helloworld") - enc, err := scale.Marshal(data) - require.NoError(t, err) - - ret, err := inst.Exec("rtm_ext_hashing_twox_256_version_1", enc) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(ret, &hash) - require.NoError(t, err) - - expected, err := common.Twox256(data) - require.NoError(t, err) - require.Equal(t, expected[:], hash) -} - -func Test_ext_trie_blake2_256_root_version_1(t *testing.T) { - inst := newTestInstance(t, runtime.HOST_API_TEST_RUNTIME) - - testinput := []string{"noot", "was", "here", "??"} - encInput, err := scale.Marshal(testinput) - require.NoError(t, err) - encInput[0] = encInput[0] >> 1 - - res, err := inst.Exec("rtm_ext_trie_blake2_256_root_version_1", encInput) - require.NoError(t, err) - - var hash []byte - err = scale.Unmarshal(res, &hash) - require.NoError(t, err) - - tt := trie.NewEmptyTrie() - tt.Put([]byte("noot"), []byte("was")) - tt.Put([]byte("here"), []byte("??")) - - expected := tt.MustHash() - require.Equal(t, expected[:], hash) -} diff --git a/lib/runtime/life/test_helpers.go b/lib/runtime/life/test_helpers.go deleted file mode 100644 index 2ed1340b63..0000000000 --- a/lib/runtime/life/test_helpers.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package life - -import ( - "context" - "testing" - - "github.com/ChainSafe/gossamer/internal/log" - "github.com/ChainSafe/gossamer/lib/keystore" - "github.com/ChainSafe/gossamer/lib/runtime" - "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/lib/trie" - "github.com/stretchr/testify/require" -) - -// DefaultTestLogLvl is the log level used for test runtime instances -var DefaultTestLogLvl = log.Info - -// newTestInstance will create a new runtime instance using the given target runtime -func newTestInstance(t *testing.T, targetRuntime string) *Instance { - return newTestInstanceWithTrie(t, targetRuntime, nil, DefaultTestLogLvl) -} - -// newTestInstanceWithTrie will create a new runtime instance with the supplied trie as the storage -func newTestInstanceWithTrie(t *testing.T, targetRuntime string, tt *trie.Trie, lvl log.Level) *Instance { - testRuntimeFilePath, err := runtime.GetRuntime(context.Background(), targetRuntime) - require.NoError(t, err) - cfg := setupConfig(t, tt, lvl, 0) - r, err := NewInstanceFromFile(testRuntimeFilePath, cfg) - require.NoError(t, err, "Got error when trying to create new VM", "targetRuntime", targetRuntime) - require.NotNil(t, r, "Could not create new VM instance", "targetRuntime", targetRuntime) - return r -} - -func setupConfig(t *testing.T, tt *trie.Trie, lvl log.Level, role byte) *Config { - s, err := storage.NewTrieState(tt) - require.NoError(t, err) - - ns := runtime.NodeStorage{ - LocalStorage: runtime.NewInMemoryDB(t), - PersistentStorage: runtime.NewInMemoryDB(t), // we're using a local storage here since this is a test runtime - } - cfg := &Config{} - cfg.Storage = s - cfg.Keystore = keystore.NewGlobalKeystore() - cfg.LogLvl = lvl - cfg.NodeStorage = ns - cfg.Network = new(runtime.TestRuntimeNetwork) - cfg.Role = role - cfg.Resolver = new(Resolver) - return cfg -} From a0a1804b8572e1eeba8faceccc3254409786ab46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Wed, 6 Jul 2022 13:23:49 -0400 Subject: [PATCH 37/48] fix(dot/state/epoch, lib/babe): enable block production through epochs without rely on finalization (#2593) * fix: block production to be independant of finalized blocks Co-authored-by: Timothy Wu --- dot/state/epoch.go | 186 +++++++++++++++--------------------- dot/state/epoch_test.go | 3 - lib/babe/epoch.go | 81 ++++++---------- lib/babe/epoch_test.go | 115 ++++++++++------------ lib/babe/errors.go | 2 - lib/babe/mock_state_test.go | 30 ------ lib/babe/state.go | 3 - lib/babe/verify.go | 17 +--- lib/babe/verify_test.go | 68 +------------ 9 files changed, 164 insertions(+), 341 deletions(-) diff --git a/dot/state/epoch.go b/dot/state/epoch.go index d8f02ed2c7..0b5ab5db83 100644 --- a/dot/state/epoch.go +++ b/dot/state/epoch.go @@ -12,14 +12,16 @@ import ( "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/pkg/scale" ) var ( + ErrConfigNotFound = errors.New("config data not found") ErrEpochNotInMemory = errors.New("epoch not found in memory map") errHashNotInMemory = errors.New("hash not found in memory map") - errEpochDataNotFound = errors.New("epoch data not found in the database") + errEpochNotInDatabase = errors.New("epoch data not found in the database") errHashNotPersisted = errors.New("hash with next epoch not found in database") errNoPreRuntimeDigest = errors.New("header does not contain pre-runtime digest") ) @@ -58,11 +60,11 @@ type EpochState struct { nextEpochDataLock sync.RWMutex // nextEpochData follows the format map[epoch]map[block hash]next epoch data - nextEpochData map[uint64]map[common.Hash]types.NextEpochData + nextEpochData nextEpochMap[types.NextEpochData] nextConfigDataLock sync.RWMutex // nextConfigData follows the format map[epoch]map[block hash]next config data - nextConfigData map[uint64]map[common.Hash]types.NextConfigData + nextConfigData nextEpochMap[types.NextConfigData] } // NewEpochStateFromGenesis returns a new EpochState given information for the first epoch, fetched from the runtime @@ -90,8 +92,8 @@ func NewEpochStateFromGenesis(db chaindb.Database, blockState *BlockState, blockState: blockState, db: epochDB, epochLength: genesisConfig.EpochLength, - nextEpochData: make(map[uint64]map[common.Hash]types.NextEpochData), - nextConfigData: make(map[uint64]map[common.Hash]types.NextConfigData), + nextEpochData: make(nextEpochMap[types.NextEpochData]), + nextConfigData: make(nextEpochMap[types.NextConfigData]), } auths, err := types.BABEAuthorityRawToAuthority(genesisConfig.GenesisAuthorities) @@ -151,8 +153,8 @@ func NewEpochState(db chaindb.Database, blockState *BlockState) (*EpochState, er db: chaindb.NewTable(db, epochPrefix), epochLength: epochLength, skipToEpoch: skipToEpoch, - nextEpochData: make(map[uint64]map[common.Hash]types.NextEpochData), - nextConfigData: make(map[uint64]map[common.Hash]types.NextConfigData), + nextEpochData: make(nextEpochMap[types.NextEpochData]), + nextConfigData: make(nextEpochMap[types.NextConfigData]), }, nil } @@ -247,25 +249,29 @@ func (s *EpochState) SetEpochData(epoch uint64, info *types.EpochData) error { // if the header params is nil then it will search only in database func (s *EpochState) GetEpochData(epoch uint64, header *types.Header) (*types.EpochData, error) { epochData, err := s.getEpochDataFromDatabase(epoch) - if err == nil && epochData != nil { + if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + return nil, fmt.Errorf("failed to retrieve epoch data from database: %w", err) + } + + if epochData != nil { return epochData, nil } - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { - return nil, fmt.Errorf("failed to get epoch data from database: %w", err) + if header == nil { + return nil, errEpochNotInDatabase } - // lookup in-memory only if header is given - if header != nil && errors.Is(err, chaindb.ErrKeyNotFound) { - epochData, err = s.getEpochDataFromMemory(epoch, header) - if err != nil { - return nil, fmt.Errorf("failed to get epoch data from memory: %w", err) - } + s.nextEpochDataLock.RLock() + defer s.nextEpochDataLock.RUnlock() + + inMemoryEpochData, err := s.nextEpochData.Retrieve(s.blockState, epoch, header) + if err != nil { + return nil, fmt.Errorf("failed to get epoch data from memory: %w", err) } - if epochData == nil { - return nil, fmt.Errorf("%w: for epoch %d and header with hash %s", - errEpochDataNotFound, epoch, header.Hash()) + epochData, err = inMemoryEpochData.ToEpochData() + if err != nil { + return nil, fmt.Errorf("cannot transform into epoch data: %w", err) } return epochData, nil @@ -287,32 +293,6 @@ func (s *EpochState) getEpochDataFromDatabase(epoch uint64) (*types.EpochData, e return raw.ToEpochData() } -// getEpochDataFromMemory retrieves the right epoch data that belongs to the header parameter -func (s *EpochState) getEpochDataFromMemory(epoch uint64, header *types.Header) (*types.EpochData, error) { - s.nextEpochDataLock.RLock() - defer s.nextEpochDataLock.RUnlock() - - atEpoch, has := s.nextEpochData[epoch] - if !has { - return nil, fmt.Errorf("%w: %d", ErrEpochNotInMemory, epoch) - } - - headerHash := header.Hash() - - for hash, value := range atEpoch { - isDescendant, err := s.blockState.IsDescendantOf(hash, headerHash) - if err != nil { - return nil, fmt.Errorf("cannot verify the ancestry: %w", err) - } - - if isDescendant { - return value.ToEpochData() - } - } - - return nil, fmt.Errorf("%w: %s", errHashNotInMemory, headerHash) -} - // GetLatestEpochData returns the EpochData for the current epoch func (s *EpochState) GetLatestEpochData() (*types.EpochData, error) { curr, err := s.GetCurrentEpoch() @@ -323,26 +303,6 @@ func (s *EpochState) GetLatestEpochData() (*types.EpochData, error) { return s.GetEpochData(curr, nil) } -// HasEpochData returns whether epoch data exists for a given epoch -func (s *EpochState) HasEpochData(epoch uint64) (bool, error) { - has, err := s.db.Has(epochDataKey(epoch)) - if err == nil && has { - return has, nil - } - - // we can have `has == false` and `err == nil` - // so ensure the error is not nil in the condition below. - if err != nil && !errors.Is(chaindb.ErrKeyNotFound, err) { - return false, fmt.Errorf("cannot check database for epoch key %d: %w", epoch, err) - } - - s.nextEpochDataLock.Lock() - defer s.nextEpochDataLock.Unlock() - - _, has = s.nextEpochData[epoch] - return has, nil -} - // SetConfigData sets the BABE config data for a given epoch func (s *EpochState) SetConfigData(epoch uint64, info *types.ConfigData) error { enc, err := scale.Marshal(*info) @@ -364,28 +324,44 @@ func (s *EpochState) setLatestConfigData(epoch uint64) error { return s.db.Put(latestConfigDataKey, buf) } -// GetConfigData returns the config data for a given epoch persisted in database -// otherwise tries to get the data from the in-memory map using the header. -// If the header params is nil then it will search only in the database -func (s *EpochState) GetConfigData(epoch uint64, header *types.Header) (*types.ConfigData, error) { - configData, err := s.getConfigDataFromDatabase(epoch) - if err == nil && configData != nil { - return configData, nil - } +// GetConfigData returns the newest config data for a given epoch persisted in database +// otherwise tries to get the data from the in-memory map using the header. If we don't +// find any config data for the current epoch we lookup in the previous epochs, as the spec says: +// - The supplied configuration data are intended to be used from the next epoch onwards. +// If the header params is nil then it will search only in the database. +func (s *EpochState) GetConfigData(epoch uint64, header *types.Header) (configData *types.ConfigData, err error) { + for tryEpoch := int(epoch); tryEpoch >= 0; tryEpoch-- { + configData, err = s.getConfigDataFromDatabase(uint64(tryEpoch)) + if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + return nil, fmt.Errorf("failed to retrieve config epoch from database: %w", err) + } - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { - return nil, fmt.Errorf("failed to get config data from database: %w", err) - } else if header == nil { - // if no header is given then skip the lookup in-memory - return configData, nil - } + if configData != nil { + return configData, nil + } - configData, err = s.getConfigDataFromMemory(epoch, header) - if err != nil { - return nil, fmt.Errorf("failed to get config data from memory: %w", err) + // there is no config data for the `tryEpoch` on database and we don't have a + // header to lookup in the memory map, so let's go retrieve the previous epoch + if header == nil { + continue + } + + // we will check in the memory map and if we don't find the data + // then we continue searching through the previous epoch + s.nextConfigDataLock.RLock() + inMemoryConfigData, err := s.nextConfigData.Retrieve(s.blockState, uint64(tryEpoch), header) + s.nextConfigDataLock.RUnlock() + + if errors.Is(err, ErrEpochNotInMemory) { + continue + } else if err != nil { + return nil, fmt.Errorf("failed to get config data from memory: %w", err) + } + + return inMemoryConfigData.ToConfigData(), err } - return configData, nil + return nil, fmt.Errorf("%w: epoch %d", ErrConfigNotFound, epoch) } // getConfigDataFromDatabase returns the BABE config data for a given epoch persisted in database @@ -404,26 +380,36 @@ func (s *EpochState) getConfigDataFromDatabase(epoch uint64) (*types.ConfigData, return info, nil } -// getConfigDataFromMemory retrieves the BABE config data for a given epoch that belongs to the header parameter -func (s *EpochState) getConfigDataFromMemory(epoch uint64, header *types.Header) (*types.ConfigData, error) { - s.nextConfigDataLock.RLock() - defer s.nextConfigDataLock.RUnlock() +type nextEpochMap[T types.NextEpochData | types.NextConfigData] map[uint64]map[common.Hash]T - atEpoch, has := s.nextConfigData[epoch] +func (nem nextEpochMap[T]) Retrieve(blockState *BlockState, epoch uint64, header *types.Header) (*T, error) { + atEpoch, has := nem[epoch] if !has { return nil, fmt.Errorf("%w: %d", ErrEpochNotInMemory, epoch) } headerHash := header.Hash() - for hash, value := range atEpoch { - isDescendant, err := s.blockState.IsDescendantOf(hash, headerHash) + isDescendant, err := blockState.IsDescendantOf(hash, headerHash) + + // sometimes while moving to the next epoch is possible the header + // is not fully imported by the blocktree, in this case we will use + // its parent header which migth be already imported. + if errors.Is(err, blocktree.ErrEndNodeNotFound) { + parentHeader, err := blockState.GetHeader(header.ParentHash) + if err != nil { + return nil, fmt.Errorf("cannot get parent header: %w", err) + } + + return nem.Retrieve(blockState, epoch, parentHeader) + } + if err != nil { return nil, fmt.Errorf("cannot verify the ancestry: %w", err) } if isDescendant { - return value.ToConfigData(), nil + return &value, nil } } @@ -441,24 +427,6 @@ func (s *EpochState) GetLatestConfigData() (*types.ConfigData, error) { return s.GetConfigData(epoch, nil) } -// HasConfigData returns whether config data exists for a given epoch -func (s *EpochState) HasConfigData(epoch uint64) (bool, error) { - has, err := s.db.Has(configDataKey(epoch)) - if err == nil && has { - return has, nil - } - - if err != nil && !errors.Is(chaindb.ErrKeyNotFound, err) { - return false, fmt.Errorf("cannot check database for epoch key %d: %w", epoch, err) - } - - s.nextConfigDataLock.Lock() - defer s.nextConfigDataLock.Unlock() - - _, has = s.nextConfigData[epoch] - return has, nil -} - // GetStartSlotForEpoch returns the first slot in the given epoch. // If 0 is passed as the epoch, it returns the start slot for the current epoch. func (s *EpochState) GetStartSlotForEpoch(epoch uint64) (uint64, error) { diff --git a/dot/state/epoch_test.go b/dot/state/epoch_test.go index 234e22ca55..75b4736560 100644 --- a/dot/state/epoch_test.go +++ b/dot/state/epoch_test.go @@ -54,9 +54,6 @@ func TestEpochState_CurrentEpoch(t *testing.T) { func TestEpochState_EpochData(t *testing.T) { s := newEpochStateFromGenesis(t) - has, err := s.HasEpochData(0) - require.NoError(t, err) - require.True(t, has) keyring, err := keystore.NewSr25519Keyring() require.NoError(t, err) diff --git a/lib/babe/epoch.go b/lib/babe/epoch.go index 1e524094fb..7cf51873be 100644 --- a/lib/babe/epoch.go +++ b/lib/babe/epoch.go @@ -24,15 +24,25 @@ func (b *Service) initiateEpoch(epoch uint64) (*epochData, error) { } } - epochData, startSlot, err := b.getEpochDataAndStartSlot(epoch) + bestBlockHeader, err := b.blockState.BestBlockHeader() + if err != nil { + return nil, fmt.Errorf("cannot get the best block header: %w", err) + } + + epochData, err := b.getEpochData(epoch, bestBlockHeader) if err != nil { return nil, fmt.Errorf("cannot get epoch data and start slot: %w", err) } + startSlot, err := b.epochState.GetStartSlotForEpoch(epoch) + if err != nil { + return nil, fmt.Errorf("cannot get start slot for epoch %d: %w", epoch, err) + } + // if we're at genesis, we need to determine when the first slot of the network will be // by checking when we will be able to produce block 1. // note that this assumes there will only be one producer of block 1 - if b.blockState.BestBlockHash() == b.blockState.GenesisHash() { + if bestBlockHeader.Hash() == b.blockState.GenesisHash() { startSlot, err = b.getFirstAuthoringSlot(epoch, epochData) if err != nil { return nil, fmt.Errorf("cannot get first authoring slot: %w", err) @@ -75,78 +85,43 @@ func (b *Service) checkAndSetFirstSlot() error { return nil } -func (b *Service) getEpochDataAndStartSlot(epoch uint64) (*epochData, uint64, error) { +func (b *Service) getEpochData(epoch uint64, bestBlock *types.Header) (*epochData, error) { if epoch == 0 { - startSlot, err := b.epochState.GetStartSlotForEpoch(epoch) - if err != nil { - return nil, 0, fmt.Errorf("cannot get start slot for epoch %d: %w", epoch, err) - } - epochData, err := b.getLatestEpochData() if err != nil { - return nil, 0, fmt.Errorf("cannot get latest epoch data: %w", err) + return nil, fmt.Errorf("cannot get latest epoch data: %w", err) } - return epochData, startSlot, nil - } - - has, err := b.epochState.HasEpochData(epoch) - if err != nil { - return nil, 0, fmt.Errorf("cannot check epoch state: %w", err) - } - - if !has { - logger.Criticalf("%s number=%d", errNoEpochData, epoch) - return nil, 0, fmt.Errorf("%w: for epoch %d", errNoEpochData, epoch) + return epochData, nil } - data, err := b.epochState.GetEpochData(epoch, nil) + currEpochData, err := b.epochState.GetEpochData(epoch, bestBlock) if err != nil { - return nil, 0, fmt.Errorf("cannot get epoch data for epoch %d: %w", epoch, err) + return nil, fmt.Errorf("cannot get epoch data for epoch %d: %w", epoch, err) } - idx, err := b.getAuthorityIndex(data.Authorities) + currentConfigData, err := b.epochState.GetConfigData(epoch, bestBlock) if err != nil { - return nil, 0, fmt.Errorf("cannot get authority index: %w", err) + return nil, fmt.Errorf("cannot get config data for epoch %d: %w", epoch, err) } - has, err = b.epochState.HasConfigData(epoch) + threshold, err := CalculateThreshold(currentConfigData.C1, currentConfigData.C2, len(currEpochData.Authorities)) if err != nil { - return nil, 0, fmt.Errorf("cannot check for config data for epoch %d: %w", epoch, err) - } - - var cfgData *types.ConfigData - if has { - cfgData, err = b.epochState.GetConfigData(epoch, nil) - if err != nil { - return nil, 0, fmt.Errorf("cannot get config data for epoch %d: %w", epoch, err) - } - } else { - cfgData, err = b.epochState.GetLatestConfigData() - if err != nil { - return nil, 0, fmt.Errorf("cannot get latest config data from epoch state: %w", err) - } + return nil, fmt.Errorf("cannot calculate threshold: %w", err) } - threshold, err := CalculateThreshold(cfgData.C1, cfgData.C2, len(data.Authorities)) + idx, err := b.getAuthorityIndex(currEpochData.Authorities) if err != nil { - return nil, 0, fmt.Errorf("cannot calculate threshold: %w", err) + return nil, fmt.Errorf("cannot get authority index: %w", err) } - ed := &epochData{ - randomness: data.Randomness, - authorities: data.Authorities, + return &epochData{ + randomness: currEpochData.Randomness, + authorities: currEpochData.Authorities, authorityIndex: idx, threshold: threshold, - allowedSlots: types.AllowedSlots(cfgData.SecondarySlots), - } - - startSlot, err := b.epochState.GetStartSlotForEpoch(epoch) - if err != nil { - return nil, 0, fmt.Errorf("cannot get start slot for epoch %d: %w", epoch, err) - } - - return ed, startSlot, nil + allowedSlots: types.AllowedSlots(currentConfigData.SecondarySlots), + }, nil } func (b *Service) getLatestEpochData() (resEpochData *epochData, error error) { diff --git a/lib/babe/epoch_test.go b/lib/babe/epoch_test.go index 0e4d62ccfe..1580dea7eb 100644 --- a/lib/babe/epoch_test.go +++ b/lib/babe/epoch_test.go @@ -71,19 +71,6 @@ func TestBabeService_checkAndSetFirstSlot(t *testing.T) { } func TestBabeService_getEpochDataAndStartSlot(t *testing.T) { - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockEpochState0 := NewMockEpochState(ctrl) - mockEpochState1 := NewMockEpochState(ctrl) - mockEpochState2 := NewMockEpochState(ctrl) - - mockEpochState0.EXPECT().GetStartSlotForEpoch(uint64(0)).Return(uint64(1), nil) - mockEpochState1.EXPECT().GetStartSlotForEpoch(uint64(1)).Return(uint64(201), nil) - mockEpochState2.EXPECT().GetStartSlotForEpoch(uint64(1)).Return(uint64(201), nil) - - mockEpochState1.EXPECT().HasEpochData(uint64(1)).Return(true, nil) - mockEpochState2.EXPECT().HasEpochData(uint64(1)).Return(true, nil) - kp := keyring.Alice().(*sr25519.Keypair) authority := types.NewAuthority(kp.Public(), uint64(1)) testEpochData := &types.EpochData{ @@ -91,55 +78,21 @@ func TestBabeService_getEpochDataAndStartSlot(t *testing.T) { Authorities: []types.Authority{*authority}, } - mockEpochState1.EXPECT().GetEpochData(uint64(1), nil).Return(testEpochData, nil) - mockEpochState2.EXPECT().GetEpochData(uint64(1), nil).Return(testEpochData, nil) - - mockEpochState1.EXPECT().HasConfigData(uint64(1)).Return(true, nil) - mockEpochState2.EXPECT().HasConfigData(uint64(1)).Return(false, nil) - testConfigData := &types.ConfigData{ C1: 1, C2: 1, } - mockEpochState1.EXPECT().GetConfigData(uint64(1), nil).Return(testConfigData, nil) - testLatestConfigData := &types.ConfigData{ C1: 1, C2: 2, } - mockEpochState2.EXPECT().GetLatestConfigData().Return(testLatestConfigData, nil) - testEpochDataEpoch0 := &types.EpochData{ Randomness: [32]byte{9}, Authorities: []types.Authority{*authority}, } - mockEpochState0.EXPECT().GetLatestEpochData().Return(testEpochDataEpoch0, nil) - mockEpochState0.EXPECT().GetLatestConfigData().Return(testConfigData, nil) - - bs0 := &Service{ - authority: true, - keypair: kp, - epochState: mockEpochState0, - blockState: mockBlockState, - } - - bs1 := &Service{ - authority: true, - keypair: kp, - epochState: mockEpochState1, - blockState: mockBlockState, - } - - bs2 := &Service{ - authority: true, - keypair: kp, - epochState: mockEpochState2, - blockState: mockBlockState, - } - threshold0, err := CalculateThreshold(testConfigData.C1, testConfigData.C2, 1) require.NoError(t, err) @@ -147,16 +100,27 @@ func TestBabeService_getEpochDataAndStartSlot(t *testing.T) { require.NoError(t, err) cases := []struct { + service func(*gomock.Controller) *Service name string - service *Service epoch uint64 expected *epochData expectedStartSlot uint64 }{ { - name: "should get epoch data for epoch 0", - service: bs0, - epoch: 0, + name: "should get epoch data for epoch 0", + service: func(ctrl *gomock.Controller) *Service { + mockEpochState := NewMockEpochState(ctrl) + + mockEpochState.EXPECT().GetLatestEpochData().Return(testEpochDataEpoch0, nil) + mockEpochState.EXPECT().GetLatestConfigData().Return(testConfigData, nil) + + return &Service{ + authority: true, + keypair: kp, + epochState: mockEpochState, + } + }, + epoch: 0, expected: &epochData{ randomness: testEpochDataEpoch0.Randomness, authorities: testEpochDataEpoch0.Authorities, @@ -166,9 +130,20 @@ func TestBabeService_getEpochDataAndStartSlot(t *testing.T) { expectedStartSlot: 1, }, { - name: "should get epoch data for epoch 1 with config data from epoch 1", - service: bs1, - epoch: 1, + name: "should get epoch data for epoch 1 with config data from epoch 1", + service: func(ctrl *gomock.Controller) *Service { + mockEpochState := NewMockEpochState(ctrl) + + mockEpochState.EXPECT().GetEpochData(uint64(1), nil).Return(testEpochData, nil) + mockEpochState.EXPECT().GetConfigData(uint64(1), nil).Return(testConfigData, nil) + + return &Service{ + authority: true, + keypair: kp, + epochState: mockEpochState, + } + }, + epoch: 1, expected: &epochData{ randomness: testEpochData.Randomness, authorities: testEpochData.Authorities, @@ -178,9 +153,20 @@ func TestBabeService_getEpochDataAndStartSlot(t *testing.T) { expectedStartSlot: 201, }, { - name: "should get epoch data for epoch 1 and config data for epoch 0", - service: bs2, - epoch: 1, + name: "should get epoch data for epoch 1 and config data for epoch 0", + service: func(ctrl *gomock.Controller) *Service { + mockEpochState := NewMockEpochState(ctrl) + + mockEpochState.EXPECT().GetEpochData(uint64(1), nil).Return(testEpochData, nil) + mockEpochState.EXPECT().GetConfigData(uint64(1), nil).Return(testLatestConfigData, nil) + + return &Service{ + authority: true, + keypair: kp, + epochState: mockEpochState, + } + }, + epoch: 1, expected: &epochData{ randomness: testEpochData.Randomness, authorities: testEpochData.Authorities, @@ -191,10 +177,15 @@ func TestBabeService_getEpochDataAndStartSlot(t *testing.T) { }, } - for _, tc := range cases { - res, startSlot, err := tc.service.getEpochDataAndStartSlot(tc.epoch) - require.NoError(t, err) - require.Equal(t, tc.expected, res) - require.Equal(t, tc.expectedStartSlot, startSlot) + for _, tt := range cases { + tt := tt + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + service := tt.service(ctrl) + + res, err := service.getEpochData(tt.epoch, nil) + require.NoError(t, err) + require.Equal(t, tt.expected, res) + }) } } diff --git a/lib/babe/errors.go b/lib/babe/errors.go index 527a0a8915..b851f2c05a 100644 --- a/lib/babe/errors.go +++ b/lib/babe/errors.go @@ -69,12 +69,10 @@ var ( errNilStorageState = errors.New("storage state is nil") errNilParentHeader = errors.New("parent header is nil") errInvalidResult = errors.New("invalid error value") - errNoEpochData = errors.New("no epoch data found for upcoming epoch") errFirstBlockTimeout = errors.New("timed out waiting for first block") errChannelClosed = errors.New("block notifier channel was closed") errOverPrimarySlotThreshold = errors.New("cannot claim slot, over primary threshold") errNotOurTurnToPropose = errors.New("cannot claim slot, not our turn to propose a block") - errNoConfigData = errors.New("cannot find ConfigData for epoch") errGetEpochData = errors.New("get epochData error") errFailedFinalisation = errors.New("failed to check finalisation") errMissingDigest = errors.New("chain head missing digest") diff --git a/lib/babe/mock_state_test.go b/lib/babe/mock_state_test.go index f5bdfd2d91..38815094a9 100644 --- a/lib/babe/mock_state_test.go +++ b/lib/babe/mock_state_test.go @@ -678,36 +678,6 @@ func (mr *MockEpochStateMockRecorder) GetStartSlotForEpoch(arg0 interface{}) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStartSlotForEpoch", reflect.TypeOf((*MockEpochState)(nil).GetStartSlotForEpoch), arg0) } -// HasConfigData mocks base method. -func (m *MockEpochState) HasConfigData(arg0 uint64) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasConfigData", arg0) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HasConfigData indicates an expected call of HasConfigData. -func (mr *MockEpochStateMockRecorder) HasConfigData(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasConfigData", reflect.TypeOf((*MockEpochState)(nil).HasConfigData), arg0) -} - -// HasEpochData mocks base method. -func (m *MockEpochState) HasEpochData(arg0 uint64) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasEpochData", arg0) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HasEpochData indicates an expected call of HasEpochData. -func (mr *MockEpochStateMockRecorder) HasEpochData(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasEpochData", reflect.TypeOf((*MockEpochState)(nil).HasEpochData), arg0) -} - // SetCurrentEpoch mocks base method. func (m *MockEpochState) SetCurrentEpoch(arg0 uint64) error { m.ctrl.T.Helper() diff --git a/lib/babe/state.go b/lib/babe/state.go index 5b168e850a..897f06bd35 100644 --- a/lib/babe/state.go +++ b/lib/babe/state.go @@ -66,12 +66,9 @@ type EpochState interface { GetCurrentEpoch() (uint64, error) SetEpochData(uint64, *types.EpochData) error - HasEpochData(epoch uint64) (bool, error) - GetEpochData(epoch uint64, header *types.Header) (*types.EpochData, error) GetConfigData(epoch uint64, header *types.Header) (*types.ConfigData, error) - HasConfigData(epoch uint64) (bool, error) GetLatestConfigData() (*types.ConfigData, error) GetStartSlotForEpoch(epoch uint64) (uint64, error) GetEpochForBlock(header *types.Header) (uint64, error) diff --git a/lib/babe/verify.go b/lib/babe/verify.go index 671114ef2d..50b59c2211 100644 --- a/lib/babe/verify.go +++ b/lib/babe/verify.go @@ -202,7 +202,7 @@ func (v *VerificationManager) getVerifierInfo(epoch uint64, header *types.Header return nil, fmt.Errorf("failed to get epoch data for epoch %d: %w", epoch, err) } - configData, err := v.getConfigData(epoch, header) + configData, err := v.epochState.GetConfigData(epoch, header) if err != nil { return nil, fmt.Errorf("failed to get config data: %w", err) } @@ -220,21 +220,6 @@ func (v *VerificationManager) getVerifierInfo(epoch uint64, header *types.Header }, nil } -func (v *VerificationManager) getConfigData(epoch uint64, header *types.Header) (*types.ConfigData, error) { - for i := int(epoch); i >= 0; i-- { - has, err := v.epochState.HasConfigData(uint64(i)) - if err != nil { - return nil, err - } else if !has { - continue - } - - return v.epochState.GetConfigData(uint64(i), header) - } - - return nil, errNoConfigData -} - // verifier is a BABE verifier for a specific authority set, randomness, and threshold type verifier struct { blockState BlockState diff --git a/lib/babe/verify_test.go b/lib/babe/verify_test.go index f6c106ca7c..d8c8432ebf 100644 --- a/lib/babe/verify_test.go +++ b/lib/babe/verify_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" @@ -730,63 +731,6 @@ func Test_verifier_verifyAuthorshipRight(t *testing.T) { } } -func TestVerificationManager_getConfigData(t *testing.T) { - ctrl := gomock.NewController(t) - mockBlockState := NewMockBlockState(ctrl) - mockEpochStateEmpty := NewMockEpochState(ctrl) - mockEpochStateHasErr := NewMockEpochState(ctrl) - mockEpochStateGetErr := NewMockEpochState(ctrl) - - testHeader := types.NewEmptyHeader() - - mockEpochStateEmpty.EXPECT().HasConfigData(uint64(0)).Return(false, nil) - mockEpochStateHasErr.EXPECT().HasConfigData(uint64(0)).Return(false, errNoConfigData) - mockEpochStateGetErr.EXPECT().HasConfigData(uint64(0)).Return(true, nil) - mockEpochStateGetErr.EXPECT().GetConfigData(uint64(0), testHeader).Return(nil, errNoConfigData) - - vm0, err := NewVerificationManager(mockBlockState, mockEpochStateEmpty) - assert.NoError(t, err) - vm1, err := NewVerificationManager(mockBlockState, mockEpochStateHasErr) - assert.NoError(t, err) - vm2, err := NewVerificationManager(mockBlockState, mockEpochStateGetErr) - assert.NoError(t, err) - tests := []struct { - name string - vm *VerificationManager - epoch uint64 - exp *types.ConfigData - expErr error - }{ - { - name: "cant find ConfigData", - vm: vm0, - expErr: errNoConfigData, - }, - { - name: "hasConfigData error", - vm: vm1, - expErr: errNoConfigData, - }, - { - name: "getConfigData error", - vm: vm2, - expErr: errNoConfigData, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - v := tt.vm - res, err := v.getConfigData(tt.epoch, testHeader) - if tt.expErr != nil { - assert.EqualError(t, err, tt.expErr.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.exp, res) - }) - } -} - func TestVerificationManager_getVerifierInfo(t *testing.T) { ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) @@ -797,13 +741,12 @@ func TestVerificationManager_getVerifierInfo(t *testing.T) { testHeader := types.NewEmptyHeader() - mockEpochStateGetErr.EXPECT().GetEpochData(uint64(0), testHeader).Return(nil, errNoConfigData) + mockEpochStateGetErr.EXPECT().GetEpochData(uint64(0), testHeader).Return(nil, state.ErrEpochNotInMemory) mockEpochStateHasErr.EXPECT().GetEpochData(uint64(0), testHeader).Return(&types.EpochData{}, nil) - mockEpochStateHasErr.EXPECT().HasConfigData(uint64(0)).Return(false, errNoConfigData) + mockEpochStateHasErr.EXPECT().GetConfigData(uint64(0), testHeader).Return(&types.ConfigData{}, state.ErrConfigNotFound) mockEpochStateThresholdErr.EXPECT().GetEpochData(uint64(0), testHeader).Return(&types.EpochData{}, nil) - mockEpochStateThresholdErr.EXPECT().HasConfigData(uint64(0)).Return(true, nil) mockEpochStateThresholdErr.EXPECT().GetConfigData(uint64(0), testHeader). Return(&types.ConfigData{ C1: 3, @@ -811,7 +754,6 @@ func TestVerificationManager_getVerifierInfo(t *testing.T) { }, nil) mockEpochStateOk.EXPECT().GetEpochData(uint64(0), testHeader).Return(&types.EpochData{}, nil) - mockEpochStateOk.EXPECT().HasConfigData(uint64(0)).Return(true, nil) mockEpochStateOk.EXPECT().GetConfigData(uint64(0), testHeader). Return(&types.ConfigData{ C1: 1, @@ -837,12 +779,12 @@ func TestVerificationManager_getVerifierInfo(t *testing.T) { { name: "getEpochData error", vm: vm0, - expErr: fmt.Errorf("failed to get epoch data for epoch %d: %w", 0, errNoConfigData), + expErr: fmt.Errorf("failed to get epoch data for epoch %d: %w", 0, state.ErrEpochNotInMemory), }, { name: "getConfigData error", vm: vm1, - expErr: fmt.Errorf("failed to get config data: %w", errNoConfigData), + expErr: fmt.Errorf("failed to get config data: %w", state.ErrConfigNotFound), }, { name: "calculate threshold error", From d2da7fbf7b7f3835ad3bcbe7e9ebe1d76d567de5 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Wed, 6 Jul 2022 21:19:23 -0400 Subject: [PATCH 38/48] chore(trie): proof code refactoring and fixes (#2604) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - New package `lib/trie/proof` containing: - `lib/trie/proof.go` (refactored) - `internal/trie/record` files (then removed) - `lib/trie/database.go` (moved since it had nothing to do with database) - Proof `Verify` function: - ⚠️ returns an error when root is not found in proof encoded nodes - ⚠️ fix `buildTrie` to detect root node for roots with encoding smaller than 32 bytes - ⚠️ Removes child with no matching hash from proof trie - ⚠️ Only decode root node, and then lazy decode nodes. **This is to allow for 'values' (not node encodings) to be together node encodings with v1 generated proofs**. - Only verify for a single key and value instead of pairs (no usage at all) - Produce ordered (from root to leaf) proof encoded nodes - Returns only an error and no boolean (was unneeded before) - Proof `Generate` function: - ⚠️ returns an error when one key is not found in trie when generating proof - Remove `internal/trie/recorder` code (now unneeded) - Generate encoded nodes ordered from root to leaf (to reduce verify hash computations) - Do not add inlined nodes - Add full coverage unit tests and remove older tests - Minor changes - Better error wrapping - Database arguments uses a smaller locally defined `Database` interface in `proof` and `trie` packages - rename `proofEncodedNodes` to `encodedProofNodes` - rename `decProofs` to `encodedProofNodes` --- dot/state/storage.go | 6 +- internal/trie/node/children.go | 10 + internal/trie/node/children_test.go | 39 ++ internal/trie/record/node.go | 10 - internal/trie/record/recorder.go | 27 -- internal/trie/record/recorder_test.go | 122 ----- lib/runtime/wasmer/imports.go | 14 +- lib/runtime/wasmer/imports_test.go | 15 +- lib/trie/database.go | 79 +--- lib/trie/lookup.go | 50 -- lib/trie/proof.go | 103 ----- lib/trie/proof/generate.go | 139 ++++++ lib/trie/proof/generate_test.go | 613 +++++++++++++++++++++++++ lib/trie/proof/helpers_test.go | 102 +++++ lib/trie/proof/mocks_generate_test.go | 6 + lib/trie/proof/mocks_test.go | 49 ++ lib/trie/proof/proof_test.go | 52 +++ lib/trie/proof/verify.go | 177 ++++++++ lib/trie/proof/verify_test.go | 628 ++++++++++++++++++++++++++ lib/trie/proof_test.go | 227 ---------- 20 files changed, 1839 insertions(+), 629 deletions(-) delete mode 100644 internal/trie/record/node.go delete mode 100644 internal/trie/record/recorder.go delete mode 100644 internal/trie/record/recorder_test.go delete mode 100644 lib/trie/lookup.go delete mode 100644 lib/trie/proof.go create mode 100644 lib/trie/proof/generate.go create mode 100644 lib/trie/proof/generate_test.go create mode 100644 lib/trie/proof/helpers_test.go create mode 100644 lib/trie/proof/mocks_generate_test.go create mode 100644 lib/trie/proof/mocks_test.go create mode 100644 lib/trie/proof/proof_test.go create mode 100644 lib/trie/proof/verify.go create mode 100644 lib/trie/proof/verify_test.go delete mode 100644 lib/trie/proof_test.go diff --git a/dot/state/storage.go b/dot/state/storage.go index b4781b204c..0251d2de99 100644 --- a/dot/state/storage.go +++ b/dot/state/storage.go @@ -14,6 +14,7 @@ import ( "github.com/ChainSafe/gossamer/lib/common" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/ChainSafe/gossamer/lib/trie" + "github.com/ChainSafe/gossamer/lib/trie/proof" ) // storagePrefix storage key prefix. @@ -301,6 +302,7 @@ func (s *StorageState) LoadCodeHash(hash *common.Hash) (common.Hash, error) { } // GenerateTrieProof returns the proofs related to the keys on the state root trie -func (s *StorageState) GenerateTrieProof(stateRoot common.Hash, keys [][]byte) ([][]byte, error) { - return trie.GenerateProof(stateRoot[:], keys, s.db) +func (s *StorageState) GenerateTrieProof(stateRoot common.Hash, keys [][]byte) ( + encodedProofNodes [][]byte, err error) { + return proof.Generate(stateRoot[:], keys, s.db) } diff --git a/internal/trie/node/children.go b/internal/trie/node/children.go index b08c711c9e..725366b42e 100644 --- a/internal/trie/node/children.go +++ b/internal/trie/node/children.go @@ -30,3 +30,13 @@ func (n *Node) NumChildren() (count int) { } return count } + +// HasChild returns true if the node has at least one child. +func (n *Node) HasChild() (has bool) { + for _, child := range n.Children { + if child != nil { + return true + } + } + return false +} diff --git a/internal/trie/node/children_test.go b/internal/trie/node/children_test.go index 66a2603009..17ab48b2f1 100644 --- a/internal/trie/node/children_test.go +++ b/internal/trie/node/children_test.go @@ -118,3 +118,42 @@ func Test_Node_NumChildren(t *testing.T) { }) } } + +func Test_Node_HasChild(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + node Node + has bool + }{ + "no child": {}, + "one child at index 0": { + node: Node{ + Children: []*Node{ + {}, + }, + }, + has: true, + }, + "one child at index 1": { + node: Node{ + Children: []*Node{ + nil, + {}, + }, + }, + has: true, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + has := testCase.node.HasChild() + + assert.Equal(t, testCase.has, has) + }) + } +} diff --git a/internal/trie/record/node.go b/internal/trie/record/node.go deleted file mode 100644 index 19a745c82c..0000000000 --- a/internal/trie/record/node.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package record - -// Node represents a record of a visited node -type Node struct { - RawData []byte - Hash []byte -} diff --git a/internal/trie/record/recorder.go b/internal/trie/record/recorder.go deleted file mode 100644 index 130b434338..0000000000 --- a/internal/trie/record/recorder.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package record - -// Recorder records the list of nodes found by Lookup.Find -type Recorder struct { - nodes []Node -} - -// NewRecorder creates a new recorder. -func NewRecorder() *Recorder { - return &Recorder{} -} - -// Record appends a node to the list of visited nodes. -func (r *Recorder) Record(hash, rawData []byte) { - r.nodes = append(r.nodes, Node{RawData: rawData, Hash: hash}) -} - -// GetNodes returns all the nodes recorded. -// Note it does not copy its slice of nodes. -// It's fine to not copy them since the recorder -// is not used again after a call to GetNodes() -func (r *Recorder) GetNodes() (nodes []Node) { - return r.nodes -} diff --git a/internal/trie/record/recorder_test.go b/internal/trie/record/recorder_test.go deleted file mode 100644 index cdf0ed3eaa..0000000000 --- a/internal/trie/record/recorder_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package record - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_NewRecorder(t *testing.T) { - t.Parallel() - - expected := &Recorder{} - - recorder := NewRecorder() - - assert.Equal(t, expected, recorder) -} - -func Test_Recorder_Record(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - recorder *Recorder - hash []byte - rawData []byte - expectedRecorder *Recorder - }{ - "nil data": { - recorder: &Recorder{}, - expectedRecorder: &Recorder{ - nodes: []Node{ - {}, - }, - }, - }, - "insert in empty recorder": { - recorder: &Recorder{}, - hash: []byte{1, 2}, - rawData: []byte{3, 4}, - expectedRecorder: &Recorder{ - nodes: []Node{ - {Hash: []byte{1, 2}, RawData: []byte{3, 4}}, - }, - }, - }, - "insert in non-empty recorder": { - recorder: &Recorder{ - nodes: []Node{ - {Hash: []byte{5, 6}, RawData: []byte{7, 8}}, - }, - }, - hash: []byte{1, 2}, - rawData: []byte{3, 4}, - expectedRecorder: &Recorder{ - nodes: []Node{ - {Hash: []byte{5, 6}, RawData: []byte{7, 8}}, - {Hash: []byte{1, 2}, RawData: []byte{3, 4}}, - }, - }, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - - testCase.recorder.Record(testCase.hash, testCase.rawData) - - assert.Equal(t, testCase.expectedRecorder, testCase.recorder) - }) - } -} - -func Test_Recorder_GetNodes(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - recorder *Recorder - nodes []Node - }{ - "no node": { - recorder: &Recorder{}, - }, - "get single node from recorder": { - recorder: &Recorder{ - nodes: []Node{ - {Hash: []byte{1, 2}, RawData: []byte{3, 4}}, - }, - }, - nodes: []Node{{Hash: []byte{1, 2}, RawData: []byte{3, 4}}}, - }, - "get node from multiple nodes in recorder": { - recorder: &Recorder{ - nodes: []Node{ - {Hash: []byte{1, 2}, RawData: []byte{3, 4}}, - {Hash: []byte{5, 6}, RawData: []byte{7, 8}}, - {Hash: []byte{9, 6}, RawData: []byte{7, 8}}, - }, - }, - nodes: []Node{ - {Hash: []byte{1, 2}, RawData: []byte{3, 4}}, - {Hash: []byte{5, 6}, RawData: []byte{7, 8}}, - {Hash: []byte{9, 6}, RawData: []byte{7, 8}}, - }, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - - nodes := testCase.recorder.GetNodes() - - assert.Equal(t, testCase.nodes, nodes) - }) - } -} diff --git a/lib/runtime/wasmer/imports.go b/lib/runtime/wasmer/imports.go index d62dcfcf26..509c5a3e34 100644 --- a/lib/runtime/wasmer/imports.go +++ b/lib/runtime/wasmer/imports.go @@ -121,6 +121,7 @@ import ( rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/ChainSafe/gossamer/lib/transaction" "github.com/ChainSafe/gossamer/lib/trie" + "github.com/ChainSafe/gossamer/lib/trie/proof" "github.com/ChainSafe/gossamer/pkg/scale" wasm "github.com/wasmerio/go-ext-wasm/wasmer" @@ -886,8 +887,8 @@ func ext_trie_blake2_256_verify_proof_version_1(context unsafe.Pointer, rootSpan instanceContext := wasm.IntoInstanceContext(context) toDecProofs := asMemorySlice(instanceContext, proofSpan) - var decProofs [][]byte - err := scale.Unmarshal(toDecProofs, &decProofs) + var encodedProofNodes [][]byte + err := scale.Unmarshal(toDecProofs, &encodedProofNodes) if err != nil { logger.Errorf("[ext_trie_blake2_256_verify_proof_version_1]: %s", err) return C.int32_t(0) @@ -899,18 +900,13 @@ func ext_trie_blake2_256_verify_proof_version_1(context unsafe.Pointer, rootSpan mem := instanceContext.Memory().Data() trieRoot := mem[rootSpan : rootSpan+32] - exists, err := trie.VerifyProof(decProofs, trieRoot, []trie.Pair{{Key: key, Value: value}}) + err = proof.Verify(encodedProofNodes, trieRoot, key, value) if err != nil { logger.Errorf("[ext_trie_blake2_256_verify_proof_version_1]: %s", err) return C.int32_t(0) } - var result C.int32_t = 0 - if exists { - result = 1 - } - - return result + return C.int32_t(1) } //export ext_misc_print_hex_version_1 diff --git a/lib/runtime/wasmer/imports_test.go b/lib/runtime/wasmer/imports_test.go index c173cccaf7..2f1581711a 100644 --- a/lib/runtime/wasmer/imports_test.go +++ b/lib/runtime/wasmer/imports_test.go @@ -22,6 +22,7 @@ import ( "github.com/ChainSafe/gossamer/lib/runtime" "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/ChainSafe/gossamer/lib/trie" + "github.com/ChainSafe/gossamer/lib/trie/proof" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -1801,7 +1802,7 @@ func Test_ext_trie_blake2_256_verify_proof_version_1(t *testing.T) { root := hash.ToBytes() otherRoot := otherHash.ToBytes() - proof, err := trie.GenerateProof(root, keys, memdb) + allProofs, err := proof.Generate(root, keys, memdb) require.NoError(t, err) testcases := map[string]struct { @@ -1810,17 +1811,17 @@ func Test_ext_trie_blake2_256_verify_proof_version_1(t *testing.T) { expect bool }{ "Proof should be true": { - root: root, key: []byte("do"), proof: proof, value: []byte("verb"), expect: true}, + root: root, key: []byte("do"), proof: allProofs, value: []byte("verb"), expect: true}, "Root empty, proof should be false": { - root: []byte{}, key: []byte("do"), proof: proof, value: []byte("verb"), expect: false}, + root: []byte{}, key: []byte("do"), proof: allProofs, value: []byte("verb"), expect: false}, "Other root, proof should be false": { - root: otherRoot, key: []byte("do"), proof: proof, value: []byte("verb"), expect: false}, + root: otherRoot, key: []byte("do"), proof: allProofs, value: []byte("verb"), expect: false}, "Value empty, proof should be true": { - root: root, key: []byte("do"), proof: proof, value: nil, expect: true}, + root: root, key: []byte("do"), proof: allProofs, value: nil, expect: true}, "Unknow key, proof should be false": { - root: root, key: []byte("unknow"), proof: proof, value: nil, expect: false}, + root: root, key: []byte("unknow"), proof: allProofs, value: nil, expect: false}, "Key and value unknow, proof should be false": { - root: root, key: []byte("unknow"), proof: proof, value: []byte("unknow"), expect: false}, + root: root, key: []byte("unknow"), proof: allProofs, value: []byte("unknow"), expect: false}, "Empty proof, should be false": { root: root, key: []byte("do"), proof: [][]byte{}, value: nil, expect: false}, } diff --git a/lib/trie/database.go b/lib/trie/database.go index 2a676882b4..e4c2faa112 100644 --- a/lib/trie/database.go +++ b/lib/trie/database.go @@ -5,7 +5,6 @@ package trie import ( "bytes" - "errors" "fmt" "github.com/ChainSafe/gossamer/internal/trie/codec" @@ -15,10 +14,11 @@ import ( "github.com/ChainSafe/chaindb" ) -var ( - ErrEmptyProof = errors.New("proof slice empty") - ErrDecodeNode = errors.New("cannot decode node") -) +// Database is an interface to get values from a +// key value database. +type Database interface { + Get(key []byte) (value []byte, err error) +} // Store stores each trie node in the database, // where the key is the hash of the encoded node @@ -76,74 +76,9 @@ func (t *Trie) store(db chaindb.Batch, n *Node) error { return nil } -// LoadFromProof sets a partial trie based on the proof slice of encoded nodes. -// Note this is exported because it is imported is used by: -// https://github.com/ComposableFi/ibc-go/blob/6d62edaa1a3cb0768c430dab81bb195e0b0c72db/modules/light-clients/11-beefy/types/client_state.go#L78 -func (t *Trie) LoadFromProof(proofEncodedNodes [][]byte, rootHash []byte) error { - if len(proofEncodedNodes) == 0 { - return ErrEmptyProof - } - - proofHashToNode := make(map[string]*Node, len(proofEncodedNodes)) - - for i, rawNode := range proofEncodedNodes { - decodedNode, err := node.Decode(bytes.NewReader(rawNode)) - if err != nil { - return fmt.Errorf("%w: at index %d: 0x%x", - ErrDecodeNode, i, rawNode) - } - - const dirty = false - decodedNode.SetDirty(dirty) - decodedNode.Encoding = rawNode - decodedNode.HashDigest = nil - - _, hash, err := decodedNode.EncodeAndHash(false) - if err != nil { - return fmt.Errorf("cannot encode and hash node at index %d: %w", i, err) - } - - proofHash := common.BytesToHex(hash) - proofHashToNode[proofHash] = decodedNode - - if bytes.Equal(hash, rootHash) { - // Found root in proof - t.root = decodedNode - } - } - - t.loadProof(proofHashToNode, t.root) - - return nil -} - -// loadProof is a recursive function that will create all the trie paths based -// on the mapped proofs slice starting at the root -func (t *Trie) loadProof(proofHashToNode map[string]*Node, n *Node) { - if n.Type() != node.Branch { - return - } - - branch := n - for i, child := range branch.Children { - if child == nil { - continue - } - - proofHash := common.BytesToHex(child.HashDigest) - node, ok := proofHashToNode[proofHash] - if !ok { - continue - } - - branch.Children[i] = node - t.loadProof(proofHashToNode, node) - } -} - // Load reconstructs the trie from the database from the given root hash. // It is used when restarting the node to load the current state trie. -func (t *Trie) Load(db chaindb.Database, rootHash common.Hash) error { +func (t *Trie) Load(db Database, rootHash common.Hash) error { if rootHash == EmptyHash { t.root = nil return nil @@ -169,7 +104,7 @@ func (t *Trie) Load(db chaindb.Database, rootHash common.Hash) error { return t.load(db, t.root) } -func (t *Trie) load(db chaindb.Database, n *Node) error { +func (t *Trie) load(db Database, n *Node) error { if n.Type() != node.Branch { return nil } diff --git a/lib/trie/lookup.go b/lib/trie/lookup.go deleted file mode 100644 index 4c0a169936..0000000000 --- a/lib/trie/lookup.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package trie - -import ( - "bytes" - - "github.com/ChainSafe/gossamer/internal/trie/node" - "github.com/ChainSafe/gossamer/internal/trie/record" -) - -var _ recorder = (*record.Recorder)(nil) - -type recorder interface { - Record(hash, rawData []byte) -} - -// findAndRecord search for a desired key recording all the nodes in the path including the desired node -func findAndRecord(t *Trie, key []byte, recorder recorder) error { - return find(t.root, key, recorder, true) -} - -func find(parent *Node, key []byte, recorder recorder, isCurrentRoot bool) error { - enc, hash, err := parent.EncodeAndHash(isCurrentRoot) - if err != nil { - return err - } - - recorder.Record(hash, enc) - - if parent.Type() != node.Branch { - return nil - } - - branch := parent - length := lenCommonPrefix(branch.Key, key) - - // found the value at this node - if bytes.Equal(branch.Key, key) || len(key) == 0 { - return nil - } - - // did not find value - if bytes.Equal(branch.Key[:length], key) && len(key) < len(branch.Key) { - return nil - } - - return find(branch.Children[key[length]], key[length+1:], recorder, false) -} diff --git a/lib/trie/proof.go b/lib/trie/proof.go deleted file mode 100644 index 2d8444d2db..0000000000 --- a/lib/trie/proof.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package trie - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - - "github.com/ChainSafe/chaindb" - "github.com/ChainSafe/gossamer/internal/trie/codec" - "github.com/ChainSafe/gossamer/internal/trie/record" - "github.com/ChainSafe/gossamer/lib/common" -) - -var ( - // ErrEmptyTrieRoot ... - ErrEmptyTrieRoot = errors.New("provided trie must have a root") - - // ErrValueNotFound ... - ErrValueNotFound = errors.New("expected value not found in the trie") - - // ErrKeyNotFound ... - ErrKeyNotFound = errors.New("expected key not found in the trie") - - // ErrDuplicateKeys ... - ErrDuplicateKeys = errors.New("duplicate keys on verify proof") - - // ErrLoadFromProof ... - ErrLoadFromProof = errors.New("failed to build the proof trie") -) - -// GenerateProof receive the keys to proof, the trie root and a reference to database -func GenerateProof(root []byte, keys [][]byte, db chaindb.Database) ([][]byte, error) { - trackedProofs := make(map[string][]byte) - - proofTrie := NewEmptyTrie() - if err := proofTrie.Load(db, common.BytesToHash(root)); err != nil { - return nil, err - } - - for _, k := range keys { - nk := codec.KeyLEToNibbles(k) - - recorder := record.NewRecorder() - err := findAndRecord(proofTrie, nk, recorder) - if err != nil { - return nil, err - } - - for _, recNode := range recorder.GetNodes() { - nodeHashHex := common.BytesToHex(recNode.Hash) - if _, ok := trackedProofs[nodeHashHex]; !ok { - trackedProofs[nodeHashHex] = recNode.RawData - } - } - } - - proofs := make([][]byte, 0) - for _, p := range trackedProofs { - proofs = append(proofs, p) - } - - return proofs, nil -} - -// Pair holds the key and value to check while verifying the proof -type Pair struct{ Key, Value []byte } - -// VerifyProof ensure a given key is inside a proof by creating a proof trie based on the proof slice -// this function ignores the order of proofs -func VerifyProof(proof [][]byte, root []byte, items []Pair) (bool, error) { - set := make(map[string]struct{}, len(items)) - - // check for duplicate keys - for _, item := range items { - hexKey := hex.EncodeToString(item.Key) - if _, ok := set[hexKey]; ok { - return false, ErrDuplicateKeys - } - set[hexKey] = struct{}{} - } - - proofTrie := NewEmptyTrie() - if err := proofTrie.LoadFromProof(proof, root); err != nil { - return false, fmt.Errorf("%w: %s", ErrLoadFromProof, err) - } - - for _, item := range items { - recValue := proofTrie.Get(item.Key) - if recValue == nil { - return false, ErrKeyNotFound - } - // here we need to compare value only if the caller pass the value - if len(item.Value) > 0 && !bytes.Equal(item.Value, recValue) { - return false, ErrValueNotFound - } - } - - return true, nil -} diff --git a/lib/trie/proof/generate.go b/lib/trie/proof/generate.go new file mode 100644 index 0000000000..9ab7c1dc84 --- /dev/null +++ b/lib/trie/proof/generate.go @@ -0,0 +1,139 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package proof + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ChainSafe/gossamer/internal/trie/codec" + "github.com/ChainSafe/gossamer/internal/trie/node" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/trie" +) + +var ( + ErrKeyNotFound = errors.New("key not found") +) + +// Database defines a key value Get method used +// for proof generation. +type Database interface { + Get(key []byte) (value []byte, err error) +} + +// Generate generates and deduplicates the encoded proof nodes +// for the trie corresponding to the root hash given, and for +// the slice of (Little Endian) full keys given. The database given +// is used to load the trie using the root hash given. +func Generate(rootHash []byte, fullKeys [][]byte, database Database) ( + encodedProofNodes [][]byte, err error) { + trie := trie.NewEmptyTrie() + if err := trie.Load(database, common.BytesToHash(rootHash)); err != nil { + return nil, fmt.Errorf("loading trie: %w", err) + } + rootNode := trie.RootNode() + + hashesSeen := make(map[string]struct{}) + for _, fullKey := range fullKeys { + fullKeyNibbles := codec.KeyLEToNibbles(fullKey) + const isRoot = true + newEncodedProofNodes, err := walk(rootNode, fullKeyNibbles, isRoot) + if err != nil { + // Note we wrap the full key context here since walk is recursive and + // may not be aware of the initial full key. + return nil, fmt.Errorf("walking to node at key 0x%x: %w", fullKey, err) + } + + for _, encodedProofNode := range newEncodedProofNodes { + digest, err := common.Blake2bHash(encodedProofNode) + if err != nil { + return nil, fmt.Errorf("blake2b hash: %w", err) + } + hashString := string(digest.ToBytes()) + + _, seen := hashesSeen[hashString] + if seen { + continue + } + hashesSeen[hashString] = struct{}{} + + encodedProofNodes = append(encodedProofNodes, encodedProofNode) + } + } + + return encodedProofNodes, nil +} + +func walk(parent *node.Node, fullKey []byte, isRoot bool) ( + encodedProofNodes [][]byte, err error) { + if parent == nil { + if len(fullKey) == 0 { + return nil, nil + } + return nil, ErrKeyNotFound + } + + // Note we do not use sync.Pool buffers since we would have + // to copy it so it persists in encodedProofNodes. + encodingBuffer := bytes.NewBuffer(nil) + err = parent.Encode(encodingBuffer) + if err != nil { + return nil, fmt.Errorf("encode node: %w", err) + } + + if isRoot || encodingBuffer.Len() >= 32 { + // Only add the root node encoding (whatever its length) + // and child node encodings greater or equal to 32 bytes. + // This is because child node encodings of less than 32 bytes + // are inlined in the parent node encoding, so there is no need + // to duplicate them in the proof generated. + encodedProofNodes = append(encodedProofNodes, encodingBuffer.Bytes()) + } + + nodeFound := len(fullKey) == 0 || bytes.Equal(parent.Key, fullKey) + if nodeFound { + return encodedProofNodes, nil + } + + if parent.Type() == node.Leaf && !nodeFound { + return nil, ErrKeyNotFound + } + + nodeIsDeeper := len(fullKey) > len(parent.Key) + if !nodeIsDeeper { + return nil, ErrKeyNotFound + } + + commonLength := lenCommonPrefix(parent.Key, fullKey) + childIndex := fullKey[commonLength] + nextChild := parent.Children[childIndex] + nextFullKey := fullKey[commonLength+1:] + isRoot = false + deeperEncodedProofNodes, err := walk(nextChild, nextFullKey, isRoot) + if err != nil { + return nil, err // note: do not wrap since this is recursive + } + + encodedProofNodes = append(encodedProofNodes, deeperEncodedProofNodes...) + return encodedProofNodes, nil +} + +// lenCommonPrefix returns the length of the +// common prefix between two byte slices. +func lenCommonPrefix(a, b []byte) (length int) { + min := len(a) + if len(b) < min { + min = len(b) + } + + for length = 0; length < min; length++ { + if a[length] != b[length] { + break + } + } + + return length +} diff --git a/lib/trie/proof/generate_test.go b/lib/trie/proof/generate_test.go new file mode 100644 index 0000000000..9eaffde1bf --- /dev/null +++ b/lib/trie/proof/generate_test.go @@ -0,0 +1,613 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package proof + +import ( + "errors" + "testing" + + "github.com/ChainSafe/gossamer/internal/trie/codec" + "github.com/ChainSafe/gossamer/internal/trie/node" + "github.com/ChainSafe/gossamer/lib/trie" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Generate(t *testing.T) { + t.Parallel() + + errTest := errors.New("test error") + + someHash := make([]byte, 32) + for i := range someHash { + someHash[i] = byte(i) + } + + largeValue := generateBytes(t, 40) + assertLongEncoding(t, node.Node{Value: largeValue}) + + testCases := map[string]struct { + rootHash []byte + fullKeysNibbles [][]byte + databaseBuilder func(ctrl *gomock.Controller) Database + encodedProofNodes [][]byte + errWrapped error + errMessage string + }{ + "failed loading trie": { + rootHash: someHash, + databaseBuilder: func(ctrl *gomock.Controller) Database { + mockDatabase := NewMockDatabase(ctrl) + mockDatabase.EXPECT().Get(someHash). + Return(nil, errTest) + return mockDatabase + }, + errWrapped: errTest, + errMessage: "loading trie: " + + "failed to find root key " + + "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f: " + + "test error", + }, + "walk error": { + rootHash: someHash, + fullKeysNibbles: [][]byte{{1}}, + databaseBuilder: func(ctrl *gomock.Controller) Database { + mockDatabase := NewMockDatabase(ctrl) + encodedRoot := encodeNode(t, node.Node{ + Key: []byte{1}, + Value: []byte{2}, + }) + mockDatabase.EXPECT().Get(someHash). + Return(encodedRoot, nil) + return mockDatabase + }, + errWrapped: ErrKeyNotFound, + errMessage: "walking to node at key 0x01: key not found", + }, + "leaf root": { + rootHash: someHash, + fullKeysNibbles: [][]byte{{}}, + databaseBuilder: func(ctrl *gomock.Controller) Database { + mockDatabase := NewMockDatabase(ctrl) + encodedRoot := encodeNode(t, node.Node{ + Key: []byte{1}, + Value: []byte{2}, + }) + mockDatabase.EXPECT().Get(someHash). + Return(encodedRoot, nil) + return mockDatabase + }, + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1}, + Value: []byte{2}, + }), + }, + }, + "branch root": { + rootHash: someHash, + fullKeysNibbles: [][]byte{{}}, + databaseBuilder: func(ctrl *gomock.Controller) Database { + mockDatabase := NewMockDatabase(ctrl) + encodedRoot := encodeNode(t, node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Children: padRightChildren([]*node.Node{ + nil, nil, + { + Key: []byte{3}, + Value: []byte{4}, + }, + }), + }) + mockDatabase.EXPECT().Get(someHash). + Return(encodedRoot, nil) + return mockDatabase + }, + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Children: padRightChildren([]*node.Node{ + nil, nil, + { + Key: []byte{3}, + Value: []byte{4}, + }, + }), + }), + }, + }, + "target leaf of branch": { + rootHash: someHash, + fullKeysNibbles: [][]byte{ + {1, 2, 3, 4}, + }, + databaseBuilder: func(ctrl *gomock.Controller) Database { + mockDatabase := NewMockDatabase(ctrl) + + rootNode := node.Node{ + Key: []byte{1, 2}, + Value: []byte{2}, + Children: padRightChildren([]*node.Node{ + nil, nil, nil, + { // full key 1, 2, 3, 4 + Key: []byte{4}, + Value: largeValue, + }, + }), + } + + mockDatabase.EXPECT().Get(someHash). + Return(encodeNode(t, rootNode), nil) + + encodedChild := encodeNode(t, *rootNode.Children[3]) + mockDatabase.EXPECT().Get(blake2b(t, encodedChild)). + Return(encodedChild, nil) + + return mockDatabase + }, + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1, 2}, + Value: []byte{2}, + Children: padRightChildren([]*node.Node{ + nil, nil, nil, + { + Key: []byte{4}, + Value: largeValue, + }, + }), + }), + encodeNode(t, node.Node{ + Key: []byte{4}, + Value: largeValue, + }), + }, + }, + "deduplicate proof nodes": { + rootHash: someHash, + fullKeysNibbles: [][]byte{ + {1, 2, 3, 4}, + {1, 2, 4, 4}, + {1, 2, 5, 5}, + }, + databaseBuilder: func(ctrl *gomock.Controller) Database { + mockDatabase := NewMockDatabase(ctrl) + + rootNode := node.Node{ + Key: []byte{1, 2}, + Value: []byte{2}, + Children: padRightChildren([]*node.Node{ + nil, nil, nil, + { // full key 1, 2, 3, 4 + Key: []byte{4}, + Value: largeValue, + }, + { // full key 1, 2, 4, 4 + Key: []byte{4}, + Value: largeValue, + }, + { // full key 1, 2, 5, 5 + Key: []byte{5}, + Value: largeValue, + }, + }), + } + + mockDatabase.EXPECT().Get(someHash). + Return(encodeNode(t, rootNode), nil) + + encodedLargeChild1 := encodeNode(t, *rootNode.Children[3]) + mockDatabase.EXPECT().Get(blake2b(t, encodedLargeChild1)). + Return(encodedLargeChild1, nil).Times(2) + + encodedLargeChild2 := encodeNode(t, *rootNode.Children[5]) + mockDatabase.EXPECT().Get(blake2b(t, encodedLargeChild2)). + Return(encodedLargeChild2, nil) + + return mockDatabase + }, + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1, 2}, + Value: []byte{2}, + Children: padRightChildren([]*node.Node{ + nil, nil, nil, + { // full key 1, 2, 3, 4 + Key: []byte{4}, + Value: largeValue, + }, + { // full key 1, 2, 4, 4 + Key: []byte{4}, + Value: largeValue, + }, + { // full key 1, 2, 5, 5 + Key: []byte{5}, + Value: largeValue, + }, + }), + }), + encodeNode(t, node.Node{ + Key: []byte{4}, + Value: largeValue, + }), + encodeNode(t, node.Node{ + Key: []byte{5}, + Value: largeValue, + }), + }, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + database := testCase.databaseBuilder(ctrl) + fullKeysLE := make([][]byte, len(testCase.fullKeysNibbles)) + for i, fullKeyNibbles := range testCase.fullKeysNibbles { + fullKeysLE[i] = codec.NibblesToKeyLE(fullKeyNibbles) + } + + encodedProofNodes, err := Generate(testCase.rootHash, + fullKeysLE, database) + + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + assert.Equal(t, testCase.encodedProofNodes, encodedProofNodes) + }) + } +} + +func Test_walk(t *testing.T) { + t.Parallel() + + largeValue := generateBytes(t, 40) + assertLongEncoding(t, node.Node{Value: largeValue}) + + testCases := map[string]struct { + parent *node.Node + fullKey []byte // nibbles + isRoot bool + encodedProofNodes [][]byte + errWrapped error + errMessage string + }{ + "nil parent and empty full key": {}, + "nil parent and non empty full key": { + fullKey: []byte{1}, + errWrapped: ErrKeyNotFound, + errMessage: "key not found", + }, + // The parent encode error cannot be triggered here + // since it can only be caused by a buffer.Write error. + "parent leaf and empty full key": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{1}, + }, + isRoot: true, + encodedProofNodes: [][]byte{encodeNode(t, node.Node{ + Key: []byte{1, 2}, + Value: []byte{1}, + })}, + }, + "parent leaf and shorter full key": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{1}, + }, + fullKey: []byte{1}, + errWrapped: ErrKeyNotFound, + errMessage: "key not found", + }, + "parent leaf and mismatching full key": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{1}, + }, + fullKey: []byte{1, 3}, + errWrapped: ErrKeyNotFound, + errMessage: "key not found", + }, + "parent leaf and longer full key": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{1}, + }, + fullKey: []byte{1, 2, 3}, + errWrapped: ErrKeyNotFound, + errMessage: "key not found", + }, + "branch and empty search key": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4}, + Value: []byte{5}, + }, + }), + }, + isRoot: true, + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4}, + Value: []byte{5}, + }, + }), + }), + }, + }, + "branch and shorter full key": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4}, + Value: []byte{5}, + }, + }), + }, + fullKey: []byte{1}, + errWrapped: ErrKeyNotFound, + errMessage: "key not found", + }, + "branch and mismatching full key": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4}, + Value: []byte{5}, + }, + }), + }, + fullKey: []byte{1, 3}, + errWrapped: ErrKeyNotFound, + errMessage: "key not found", + }, + "branch and matching search key": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4}, + Value: []byte{5}, + }, + }), + }, + fullKey: []byte{1, 2}, + isRoot: true, + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4}, + Value: []byte{5}, + }, + }), + }), + }, + }, + "branch and matching search key for small leaf encoding": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { // full key 1, 2, 0, 1, 2 + Key: []byte{1, 2}, + Value: []byte{3}, + }, + }), + }, + fullKey: []byte{1, 2, 0, 1, 2}, + isRoot: true, + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { // full key 1, 2, 0, 1, 2 + Key: []byte{1, 2}, + Value: []byte{3}, + }, + }), + }), + // Note the leaf encoding is not added since its encoding + // is less than 32 bytes. + }, + }, + "branch and matching search key for large leaf encoding": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { // full key 1, 2, 0, 1, 2 + Key: []byte{1, 2}, + Value: largeValue, + }, + }), + }, + fullKey: []byte{1, 2, 0, 1, 2}, + isRoot: true, + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { // full key 1, 2, 0, 1, 2 + Key: []byte{1, 2}, + Value: largeValue, + }, + }), + }), + encodeNode(t, node.Node{ + Key: []byte{1, 2}, + Value: largeValue, + }), + }, + }, + "key not found at deeper level": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4, 5}, + Value: []byte{5}, + }, + }), + }, + fullKey: []byte{1, 2, 0x04, 4}, + errWrapped: ErrKeyNotFound, + errMessage: "key not found", + }, + "found leaf at deeper level": { + parent: &node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4}, + Value: []byte{5}, + }, + }), + }, + fullKey: []byte{1, 2, 0x04}, + isRoot: true, + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1, 2}, + Value: []byte{3}, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4}, + Value: []byte{5}, + }, + }), + }), + }, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + encodedProofNodes, err := walk(testCase.parent, testCase.fullKey, testCase.isRoot) + + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + assert.Equal(t, testCase.encodedProofNodes, encodedProofNodes) + }) + } +} + +func Test_lenCommonPrefix(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + a []byte + b []byte + length int + }{ + "nil slices": {}, + "empty slices": { + a: []byte{}, + b: []byte{}, + }, + "fully different": { + a: []byte{1, 2, 3}, + b: []byte{4, 5, 6}, + }, + "fully same": { + a: []byte{1, 2, 3}, + b: []byte{1, 2, 3}, + length: 3, + }, + "different and common prefix": { + a: []byte{1, 2, 3, 4}, + b: []byte{1, 2, 4, 4}, + length: 2, + }, + "first bigger than second": { + a: []byte{1, 2, 3}, + b: []byte{1, 2}, + length: 2, + }, + "first smaller than second": { + a: []byte{1, 2}, + b: []byte{1, 2, 3}, + length: 2, + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + length := lenCommonPrefix(testCase.a, testCase.b) + + assert.Equal(t, testCase.length, length) + }) + } +} + +// Note on the performance of walk: +// It was tried to optimise appending to the encodedProofNodes +// slice by: +// 1. appending to the same slice *[][]byte passed as argument +// 2. appending the upper node to the deeper nodes slice +// In both cases, the performance difference is very small +// so the code is kept to this inefficient-looking append, +// which is in the end quite performant still. +func Benchmark_walk(b *testing.B) { + trie := trie.NewEmptyTrie() + + // Build a deep trie. + const trieDepth = 1000 + for i := 0; i < trieDepth; i++ { + keySize := 1 + i + key := make([]byte, keySize) + const trieValueSize = 10 + value := make([]byte, trieValueSize) + + trie.Put(key, value) + } + + longestKeyLE := make([]byte, trieDepth) + longestKeyNibbles := codec.KeyLEToNibbles(longestKeyLE) + + rootNode := trie.RootNode() + const isRoot = true + encodedProofNodes, err := walk(rootNode, longestKeyNibbles, isRoot) + require.NoError(b, err) + require.Equal(b, len(encodedProofNodes), trieDepth) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = walk(rootNode, longestKeyNibbles, isRoot) + } +} diff --git a/lib/trie/proof/helpers_test.go b/lib/trie/proof/helpers_test.go new file mode 100644 index 0000000000..f7279f75e2 --- /dev/null +++ b/lib/trie/proof/helpers_test.go @@ -0,0 +1,102 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package proof + +import ( + "bytes" + "math/rand" + "testing" + + "github.com/ChainSafe/gossamer/internal/trie/node" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/stretchr/testify/require" +) + +func padRightChildren(slice []*node.Node) (paddedSlice []*node.Node) { + paddedSlice = make([]*node.Node, node.ChildrenCapacity) + copy(paddedSlice, slice) + return paddedSlice +} + +func encodeNode(t *testing.T, node node.Node) (encoded []byte) { + t.Helper() + buffer := bytes.NewBuffer(nil) + err := node.Encode(buffer) + require.NoError(t, err) + return buffer.Bytes() +} + +func blake2bNode(t *testing.T, node node.Node) (digest []byte) { + t.Helper() + encoding := encodeNode(t, node) + return blake2b(t, encoding) +} + +func scaleEncode(t *testing.T, data []byte) (encoded []byte) { + t.Helper() + encoded, err := scale.Marshal(data) + require.NoError(t, err) + return encoded +} + +func blake2b(t *testing.T, data []byte) (digest []byte) { + t.Helper() + digestHash, err := common.Blake2bHash(data) + require.NoError(t, err) + digest = digestHash[:] + return digest +} + +func concatBytes(slices [][]byte) (concatenated []byte) { + for _, slice := range slices { + concatenated = append(concatenated, slice...) + } + return concatenated +} + +// generateBytes generates a pseudo random byte slice +// of the given length. It uses `0` as its seed so +// calling it multiple times will generate the same +// byte slice. This is designed as such in order to have +// deterministic unit tests. +func generateBytes(t *testing.T, length uint) (bytes []byte) { + t.Helper() + generator := rand.New(rand.NewSource(0)) + bytes = make([]byte, length) + _, err := generator.Read(bytes) + require.NoError(t, err) + return bytes +} + +// getBadNodeEncoding returns a particular bad node encoding of 33 bytes. +func getBadNodeEncoding() (badEncoding []byte) { + return []byte{ + 0x1, 0x94, 0xfd, 0xc2, 0xfa, 0x2f, 0xfc, 0xc0, 0x41, 0xd3, + 0xff, 0x12, 0x4, 0x5b, 0x73, 0xc8, 0x6e, 0x4f, 0xf9, 0x5f, + 0xf6, 0x62, 0xa5, 0xee, 0xe8, 0x2a, 0xbd, 0xf4, 0x4a, 0x2d, + 0xb, 0x75, 0xfb} +} + +func Test_getBadNodeEncoding(t *testing.T) { + t.Parallel() + + badEncoding := getBadNodeEncoding() + _, err := node.Decode(bytes.NewBuffer(badEncoding)) + require.Error(t, err) +} + +func assertLongEncoding(t *testing.T, node node.Node) { + t.Helper() + + encoding := encodeNode(t, node) + require.Greater(t, len(encoding), 32) +} + +func assertShortEncoding(t *testing.T, node node.Node) { + t.Helper() + + encoding := encodeNode(t, node) + require.LessOrEqual(t, len(encoding), 32) +} diff --git a/lib/trie/proof/mocks_generate_test.go b/lib/trie/proof/mocks_generate_test.go new file mode 100644 index 0000000000..81a9e78f9b --- /dev/null +++ b/lib/trie/proof/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package proof + +//go:generate mockgen -destination=mocks_test.go -package=$GOPACKAGE . Database diff --git a/lib/trie/proof/mocks_test.go b/lib/trie/proof/mocks_test.go new file mode 100644 index 0000000000..69262dc315 --- /dev/null +++ b/lib/trie/proof/mocks_test.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/lib/trie/proof (interfaces: Database) + +// Package proof is a generated GoMock package. +package proof + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockDatabase is a mock of Database interface. +type MockDatabase struct { + ctrl *gomock.Controller + recorder *MockDatabaseMockRecorder +} + +// MockDatabaseMockRecorder is the mock recorder for MockDatabase. +type MockDatabaseMockRecorder struct { + mock *MockDatabase +} + +// NewMockDatabase creates a new mock instance. +func NewMockDatabase(ctrl *gomock.Controller) *MockDatabase { + mock := &MockDatabase{ctrl: ctrl} + mock.recorder = &MockDatabaseMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDatabase) EXPECT() *MockDatabaseMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockDatabase) Get(arg0 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDatabaseMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDatabase)(nil).Get), arg0) +} diff --git a/lib/trie/proof/proof_test.go b/lib/trie/proof/proof_test.go new file mode 100644 index 0000000000..7a403553fb --- /dev/null +++ b/lib/trie/proof/proof_test.go @@ -0,0 +1,52 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package proof + +import ( + "fmt" + "testing" + + "github.com/ChainSafe/chaindb" + "github.com/ChainSafe/gossamer/lib/trie" + "github.com/stretchr/testify/require" +) + +func Test_Generate_Verify(t *testing.T) { + t.Parallel() + + keys := []string{ + "cat", + "catapulta", + "catapora", + "dog", + "doguinho", + } + + trie := trie.NewEmptyTrie() + + for i, key := range keys { + value := fmt.Sprintf("%x-%d", key, i) + trie.Put([]byte(key), []byte(value)) + } + + rootHash, err := trie.Hash() + require.NoError(t, err) + + database, err := chaindb.NewBadgerDB(&chaindb.Config{ + InMemory: true, + }) + require.NoError(t, err) + err = trie.Store(database) + require.NoError(t, err) + + for i, key := range keys { + fullKeys := [][]byte{[]byte(key)} + proof, err := Generate(rootHash.ToBytes(), fullKeys, database) + require.NoError(t, err) + + expectedValue := fmt.Sprintf("%x-%d", key, i) + err = Verify(proof, rootHash.ToBytes(), []byte(key), []byte(expectedValue)) + require.NoError(t, err) + } +} diff --git a/lib/trie/proof/verify.go b/lib/trie/proof/verify.go new file mode 100644 index 0000000000..b98b15476d --- /dev/null +++ b/lib/trie/proof/verify.go @@ -0,0 +1,177 @@ +// Copyright 2021 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package proof + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/ChainSafe/gossamer/internal/trie/node" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/trie" +) + +var ( + ErrKeyNotFoundInProofTrie = errors.New("key not found in proof trie") + ErrValueMismatchProofTrie = errors.New("value found in proof trie does not match") +) + +// Verify verifies a given key and value belongs to the trie by creating +// a proof trie based on the encoded proof nodes given. The order of proofs is ignored. +// A nil error is returned on success. +// Note this is exported because it is imported and used by: +// https://github.com/ComposableFi/ibc-go/blob/6d62edaa1a3cb0768c430dab81bb195e0b0c72db/modules/light-clients/11-beefy/types/client_state.go#L78 +func Verify(encodedProofNodes [][]byte, rootHash, key, value []byte) (err error) { + proofTrie, err := buildTrie(encodedProofNodes, rootHash) + if err != nil { + return fmt.Errorf("building trie from proof encoded nodes: %w", err) + } + + proofTrieValue := proofTrie.Get(key) + if proofTrieValue == nil { + return fmt.Errorf("%w: %s in proof trie for root hash 0x%x", + ErrKeyNotFoundInProofTrie, bytesToString(key), rootHash) + } + + // compare the value only if the caller pass a non empty value + if len(value) > 0 && !bytes.Equal(value, proofTrieValue) { + return fmt.Errorf("%w: expected value %s but got value %s from proof trie", + ErrValueMismatchProofTrie, bytesToString(value), bytesToString(proofTrieValue)) + } + + return nil +} + +var ( + ErrEmptyProof = errors.New("proof slice empty") + ErrRootNodeNotFound = errors.New("root node not found in proof") +) + +// buildTrie sets a partial trie based on the proof slice of encoded nodes. +func buildTrie(encodedProofNodes [][]byte, rootHash []byte) (t *trie.Trie, err error) { + if len(encodedProofNodes) == 0 { + return nil, fmt.Errorf("%w: for Merkle root hash 0x%x", + ErrEmptyProof, rootHash) + } + + merkleValueToEncoding := make(map[string][]byte, len(encodedProofNodes)) + + // This loop finds the root node and decodes it. + // The other nodes have their Merkle value (blake2b digest or the encoding itself) + // inserted into a map from merkle value to encoding. They are only decoded + // later if the root or one of its descendant node reference their Merkle value. + var root *node.Node + for _, encodedProofNode := range encodedProofNodes { + var digest []byte + if root == nil { + // root node not found yet + digestHash, err := common.Blake2bHash(encodedProofNode) + if err != nil { + return nil, fmt.Errorf("blake2b hash: %w", err) + } + digest = digestHash[:] + + if bytes.Equal(digest, rootHash) { + root, err = node.Decode(bytes.NewReader(encodedProofNode)) + if err != nil { + return nil, fmt.Errorf("decoding root node: %w", err) + } + continue // no need to add root to map of hash to encoding + } + } + + var merkleValue []byte + if len(encodedProofNode) <= 32 { + merkleValue = encodedProofNode + } else { + if digest == nil { + digestHash, err := common.Blake2bHash(encodedProofNode) + if err != nil { + return nil, fmt.Errorf("blake2b hash: %w", err) + } + digest = digestHash[:] + } + merkleValue = digest + } + + merkleValueToEncoding[string(merkleValue)] = encodedProofNode + } + + if root == nil { + proofMerkleValues := make([]string, 0, len(merkleValueToEncoding)) + for merkleValueString := range merkleValueToEncoding { + merkleValueHex := common.BytesToHex([]byte(merkleValueString)) + proofMerkleValues = append(proofMerkleValues, merkleValueHex) + } + return nil, fmt.Errorf("%w: for Merkle root hash 0x%x in proof Merkle value(s) %s", + ErrRootNodeNotFound, rootHash, strings.Join(proofMerkleValues, ", ")) + } + + err = loadProof(merkleValueToEncoding, root) + if err != nil { + return nil, fmt.Errorf("loading proof: %w", err) + } + + return trie.NewTrie(root), nil +} + +// loadProof is a recursive function that will create all the trie paths based +// on the map from node hash to node starting at the root. +func loadProof(merkleValueToEncoding map[string][]byte, n *node.Node) (err error) { + if n.Type() != node.Branch { + return nil + } + + branch := n + for i, child := range branch.Children { + if child == nil { + continue + } + + merkleValue := child.HashDigest + encoding, ok := merkleValueToEncoding[string(merkleValue)] + if !ok { + inlinedChild := len(child.Value) > 0 || child.HasChild() + if !inlinedChild { + // hash not found and the child is not inlined, + // so clear the child from the branch. + branch.Descendants -= 1 + child.Descendants + branch.Children[i] = nil + if !branch.HasChild() { + // Convert branch to a leaf if all its children are nil. + branch.Children = nil + } + } + continue + } + + child, err := node.Decode(bytes.NewReader(encoding)) + if err != nil { + return fmt.Errorf("decoding child node for Merkle value 0x%x: %w", + merkleValue, err) + } + + branch.Children[i] = child + branch.Descendants += child.Descendants + err = loadProof(merkleValueToEncoding, child) + if err != nil { + return err // do not wrap error since this is recursive + } + } + + return nil +} + +func bytesToString(b []byte) (s string) { + switch { + case b == nil: + return "nil" + case len(b) <= 20: + return fmt.Sprintf("0x%x", b) + default: + return fmt.Sprintf("0x%x...%x", b[:8], b[len(b)-8:]) + } +} diff --git a/lib/trie/proof/verify_test.go b/lib/trie/proof/verify_test.go new file mode 100644 index 0000000000..db3b346a1b --- /dev/null +++ b/lib/trie/proof/verify_test.go @@ -0,0 +1,628 @@ +// Copyright 2022 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package proof + +import ( + "testing" + + "github.com/ChainSafe/gossamer/internal/trie/node" + "github.com/ChainSafe/gossamer/lib/trie" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Verify(t *testing.T) { + t.Parallel() + + leafA := node.Node{ + Key: []byte{1}, + Value: []byte{1}, + } + + // leafB is a leaf encoding to more than 32 bytes + leafB := node.Node{ + Key: []byte{2}, + Value: generateBytes(t, 40), + } + assertLongEncoding(t, leafB) + + branch := node.Node{ + Key: []byte{3, 4}, + Value: []byte{1}, + Children: padRightChildren([]*node.Node{ + &leafB, + nil, + &leafA, + &leafB, + }), + } + assertLongEncoding(t, branch) + + testCases := map[string]struct { + encodedProofNodes [][]byte + rootHash []byte + keyLE []byte + value []byte + errWrapped error + errMessage string + }{ + "failed building proof trie": { + rootHash: []byte{1, 2, 3}, + errWrapped: ErrEmptyProof, + errMessage: "building trie from proof encoded nodes: " + + "proof slice empty: for Merkle root hash 0x010203", + }, + "value not found": { + encodedProofNodes: [][]byte{ + encodeNode(t, branch), + encodeNode(t, leafB), + // Note leaf A is small enough to be inlined in branch + }, + rootHash: blake2bNode(t, branch), + keyLE: []byte{1, 1}, // nil child of branch + errWrapped: ErrKeyNotFoundInProofTrie, + errMessage: "key not found in proof trie: " + + "0x0101 in proof trie for root hash " + + "0xec4bb0acfcf778ae8746d3ac3325fc73c3d9b376eb5f8d638dbf5eb462f5e703", + }, + "key found with nil search value": { + encodedProofNodes: [][]byte{ + encodeNode(t, branch), + encodeNode(t, leafB), + // Note leaf A is small enough to be inlined in branch + }, + rootHash: blake2bNode(t, branch), + keyLE: []byte{0x34, 0x21}, // inlined short leaf of branch + }, + "key found with mismatching value": { + encodedProofNodes: [][]byte{ + encodeNode(t, branch), + encodeNode(t, leafB), + // Note leaf A is small enough to be inlined in branch + }, + rootHash: blake2bNode(t, branch), + keyLE: []byte{0x34, 0x21}, // inlined short leaf of branch + value: []byte{2}, + errWrapped: ErrValueMismatchProofTrie, + errMessage: "value found in proof trie does not match: " + + "expected value 0x02 but got value 0x01 from proof trie", + }, + "key found with matching value": { + encodedProofNodes: [][]byte{ + encodeNode(t, branch), + encodeNode(t, leafB), + // Note leaf A is small enough to be inlined in branch + }, + rootHash: blake2bNode(t, branch), + keyLE: []byte{0x34, 0x32}, // large hash-referenced leaf of branch + value: generateBytes(t, 40), + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := Verify(testCase.encodedProofNodes, testCase.rootHash, testCase.keyLE, testCase.value) + + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + }) + } +} + +func Test_buildTrie(t *testing.T) { + t.Parallel() + + leafAShort := node.Node{ + Key: []byte{1}, + Value: []byte{2}, + } + assertShortEncoding(t, leafAShort) + + leafBLarge := node.Node{ + Key: []byte{2}, + Value: generateBytes(t, 40), + } + assertLongEncoding(t, leafBLarge) + + leafCLarge := node.Node{ + Key: []byte{3}, + Value: generateBytes(t, 40), + } + assertLongEncoding(t, leafCLarge) + + testCases := map[string]struct { + encodedProofNodes [][]byte + rootHash []byte + expectedTrie *trie.Trie + errWrapped error + errMessage string + }{ + "no proof node": { + errWrapped: ErrEmptyProof, + rootHash: []byte{1}, + errMessage: "proof slice empty: for Merkle root hash 0x01", + }, + "root node decoding error": { + encodedProofNodes: [][]byte{ + getBadNodeEncoding(), + }, + rootHash: blake2b(t, getBadNodeEncoding()), + errWrapped: node.ErrVariantUnknown, + errMessage: "decoding root node: decoding header: " + + "decoding header byte: node variant is unknown: " + + "for header byte 00000001", + }, + "root proof encoding smaller than 32 bytes": { + encodedProofNodes: [][]byte{ + encodeNode(t, leafAShort), + }, + rootHash: blake2bNode(t, leafAShort), + expectedTrie: trie.NewTrie(&node.Node{ + Key: leafAShort.Key, + Value: leafAShort.Value, + Dirty: true, + }), + }, + "root proof encoding larger than 32 bytes": { + encodedProofNodes: [][]byte{ + encodeNode(t, leafBLarge), + }, + rootHash: blake2bNode(t, leafBLarge), + expectedTrie: trie.NewTrie(&node.Node{ + Key: leafBLarge.Key, + Value: leafBLarge.Value, + Dirty: true, + }), + }, + "discard unused node": { + encodedProofNodes: [][]byte{ + encodeNode(t, leafAShort), + encodeNode(t, leafBLarge), + }, + rootHash: blake2bNode(t, leafAShort), + expectedTrie: trie.NewTrie(&node.Node{ + Key: leafAShort.Key, + Value: leafAShort.Value, + Dirty: true, + }), + }, + "multiple unordered nodes": { + encodedProofNodes: [][]byte{ + encodeNode(t, leafBLarge), // chilren 1 and 3 + encodeNode(t, node.Node{ // root + Key: []byte{1}, + Children: padRightChildren([]*node.Node{ + &leafAShort, // inlined + &leafBLarge, // referenced by Merkle value hash + &leafCLarge, // referenced by Merkle value hash + &leafBLarge, // referenced by Merkle value hash + }), + }), + encodeNode(t, leafCLarge), // children 2 + }, + rootHash: blake2bNode(t, node.Node{ + Key: []byte{1}, + Children: padRightChildren([]*node.Node{ + &leafAShort, + &leafBLarge, + &leafCLarge, + &leafBLarge, + }), + }), + expectedTrie: trie.NewTrie(&node.Node{ + Key: []byte{1}, + Descendants: 4, + Dirty: true, + Children: padRightChildren([]*node.Node{ + { + Key: leafAShort.Key, + Value: leafAShort.Value, + Dirty: true, + }, + { + Key: leafBLarge.Key, + Value: leafBLarge.Value, + Dirty: true, + }, + { + Key: leafCLarge.Key, + Value: leafCLarge.Value, + Dirty: true, + }, + { + Key: leafBLarge.Key, + Value: leafBLarge.Value, + Dirty: true, + }, + }), + }), + }, + "load proof decoding error": { + encodedProofNodes: [][]byte{ + getBadNodeEncoding(), + // root with one child pointing to hash of bad encoding above. + concatBytes([][]byte{ + {0b1000_0000 | 0b0000_0001}, // branch with key size 1 + {1}, // key + {0b0000_0001, 0b0000_0000}, // children bitmap + scaleEncode(t, blake2b(t, getBadNodeEncoding())), // child hash + }), + }, + rootHash: blake2b(t, concatBytes([][]byte{ + {0b1000_0000 | 0b0000_0001}, // branch with key size 1 + {1}, // key + {0b0000_0001, 0b0000_0000}, // children bitmap + scaleEncode(t, blake2b(t, getBadNodeEncoding())), // child hash + })), + errWrapped: node.ErrVariantUnknown, + errMessage: "loading proof: decoding child node for Merkle value " + + "0xcfa21f0ec11a3658d77701b7b1f52fbcb783fe3df662977b6e860252b6c37e1e: " + + "decoding header: decoding header byte: " + + "node variant is unknown: for header byte 00000001", + }, + "root not found": { + encodedProofNodes: [][]byte{ + encodeNode(t, node.Node{ + Key: []byte{1}, + Value: []byte{2}, + }), + }, + rootHash: []byte{3}, + errWrapped: ErrRootNodeNotFound, + errMessage: "root node not found in proof: " + + "for Merkle root hash 0x03 in proof Merkle value(s) 0x41010402", + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + trie, err := buildTrie(testCase.encodedProofNodes, testCase.rootHash) + + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + + if testCase.expectedTrie != nil { + require.NotNil(t, trie) + require.Equal(t, testCase.expectedTrie.String(), trie.String()) + } + assert.Equal(t, testCase.expectedTrie, trie) + }) + } +} + +func Test_loadProof(t *testing.T) { + t.Parallel() + + largeValue := generateBytes(t, 40) + + leafLarge := node.Node{ + Key: []byte{3}, + Value: largeValue, + } + assertLongEncoding(t, leafLarge) + + testCases := map[string]struct { + merkleValueToEncoding map[string][]byte + node *node.Node + expectedNode *node.Node + errWrapped error + errMessage string + }{ + "leaf node": { + node: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + }, + expectedNode: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + }, + }, + "branch node with child hash not found": { + node: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + {HashDigest: []byte{3}}, + }), + }, + merkleValueToEncoding: map[string][]byte{}, + expectedNode: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Dirty: true, + }, + }, + "branch node with child hash found": { + node: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + {HashDigest: []byte{2}}, + }), + }, + merkleValueToEncoding: map[string][]byte{ + string([]byte{2}): encodeNode(t, node.Node{ + Key: []byte{3}, + Value: []byte{1}, + }), + }, + expectedNode: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{3}, + Value: []byte{1}, + Dirty: true, + }, + }), + }, + }, + "branch node with one child hash found and one not found": { + node: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Descendants: 2, + Dirty: true, + Children: padRightChildren([]*node.Node{ + {HashDigest: []byte{2}}, // found + {HashDigest: []byte{3}}, // not found + }), + }, + merkleValueToEncoding: map[string][]byte{ + string([]byte{2}): encodeNode(t, node.Node{ + Key: []byte{3}, + Value: []byte{1}, + }), + }, + expectedNode: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{3}, + Value: []byte{1}, + Dirty: true, + }, + }), + }, + }, + "branch node with branch child hash": { + node: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Descendants: 2, + Dirty: true, + Children: padRightChildren([]*node.Node{ + {HashDigest: []byte{2}}, + }), + }, + merkleValueToEncoding: map[string][]byte{ + string([]byte{2}): encodeNode(t, node.Node{ + Key: []byte{3}, + Value: []byte{1}, + Children: padRightChildren([]*node.Node{ + {Key: []byte{4}, Value: []byte{2}}, + }), + }), + }, + expectedNode: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Descendants: 3, + Dirty: true, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{3}, + Value: []byte{1}, + Dirty: true, + Descendants: 1, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{4}, + Value: []byte{2}, + Dirty: true, + }, + }), + }, + }), + }, + }, + "child decoding error": { + node: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + {HashDigest: []byte{2}}, + }), + }, + merkleValueToEncoding: map[string][]byte{ + string([]byte{2}): getBadNodeEncoding(), + }, + expectedNode: &node.Node{ + Key: []byte{1}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + {HashDigest: []byte{2}}, + }), + }, + errWrapped: node.ErrVariantUnknown, + errMessage: "decoding child node for Merkle value 0x02: " + + "decoding header: decoding header byte: node variant is unknown: " + + "for header byte 00000001", + }, + "grand child": { + node: &node.Node{ + Key: []byte{1}, + Value: []byte{1}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + {HashDigest: []byte{2}}, + }), + }, + merkleValueToEncoding: map[string][]byte{ + string([]byte{2}): encodeNode(t, node.Node{ + Key: []byte{2}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + &leafLarge, // encoded to hash + }), + }), + string(blake2bNode(t, leafLarge)): encodeNode(t, leafLarge), + }, + expectedNode: &node.Node{ + Key: []byte{1}, + Value: []byte{1}, + Descendants: 2, + Dirty: true, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{2}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + { + Key: leafLarge.Key, + Value: leafLarge.Value, + Dirty: true, + }, + }), + }, + }), + }, + }, + + "grand child load proof error": { + node: &node.Node{ + Key: []byte{1}, + Value: []byte{1}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + {HashDigest: []byte{2}}, + }), + }, + merkleValueToEncoding: map[string][]byte{ + string([]byte{2}): encodeNode(t, node.Node{ + Key: []byte{2}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + &leafLarge, // encoded to hash + }), + }), + string(blake2bNode(t, leafLarge)): getBadNodeEncoding(), + }, + expectedNode: &node.Node{ + Key: []byte{1}, + Value: []byte{1}, + Descendants: 2, + Dirty: true, + Children: padRightChildren([]*node.Node{ + { + Key: []byte{2}, + Value: []byte{2}, + Descendants: 1, + Dirty: true, + Children: padRightChildren([]*node.Node{ + { + HashDigest: blake2bNode(t, leafLarge), + Dirty: true, + }, + }), + }, + }), + }, + errWrapped: node.ErrVariantUnknown, + errMessage: "decoding child node for Merkle value " + + "0x6888b9403129c11350c6054b46875292c0ffedcfd581e66b79bdf350b775ebf2: " + + "decoding header: decoding header byte: node variant is unknown: " + + "for header byte 00000001", + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := loadProof(testCase.merkleValueToEncoding, testCase.node) + + assert.ErrorIs(t, err, testCase.errWrapped) + if testCase.errWrapped != nil { + assert.EqualError(t, err, testCase.errMessage) + } + + assert.Equal(t, testCase.expectedNode.String(), testCase.node.String()) + }) + } +} + +func Test_bytesToString(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + b []byte + s string + }{ + "nil slice": { + s: "nil", + }, + "empty slice": { + b: []byte{}, + s: "0x", + }, + "small slice": { + b: []byte{1, 2, 3}, + s: "0x010203", + }, + "big slice": { + b: []byte{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + }, + s: "0x0001020304050607...0203040506070809", + }, + } + + for name, testCase := range testCases { + testCase := testCase + t.Run(name, func(t *testing.T) { + t.Parallel() + + s := bytesToString(testCase.b) + + assert.Equal(t, testCase.s, s) + }) + } +} diff --git a/lib/trie/proof_test.go b/lib/trie/proof_test.go deleted file mode 100644 index 78d58a1675..0000000000 --- a/lib/trie/proof_test.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package trie - -import ( - "testing" - - "github.com/ChainSafe/chaindb" - "github.com/stretchr/testify/require" -) - -func TestProofGeneration(t *testing.T) { - t.Parallel() - - tmp := t.TempDir() - - memdb, err := chaindb.NewBadgerDB(&chaindb.Config{ - InMemory: true, - DataDir: tmp, - }) - require.NoError(t, err) - - const size = 32 - generator := newGenerator() - - expectedValue := generateRandBytes(t, size, generator) - - trie := NewEmptyTrie() - trie.Put([]byte("cat"), generateRandBytes(t, size, generator)) - trie.Put([]byte("catapulta"), generateRandBytes(t, size, generator)) - trie.Put([]byte("catapora"), expectedValue) - trie.Put([]byte("dog"), generateRandBytes(t, size, generator)) - trie.Put([]byte("doguinho"), generateRandBytes(t, size, generator)) - - err = trie.Store(memdb) - require.NoError(t, err) - - hash, err := trie.Hash() - require.NoError(t, err) - - proof, err := GenerateProof(hash.ToBytes(), [][]byte{[]byte("catapulta"), []byte("catapora")}, memdb) - require.NoError(t, err) - - require.Equal(t, 5, len(proof)) - - pl := []Pair{ - {Key: []byte("catapora"), Value: expectedValue}, - } - - v, err := VerifyProof(proof, hash.ToBytes(), pl) - require.True(t, v) - require.NoError(t, err) -} - -func testGenerateProof(t *testing.T, entries []Pair, keys [][]byte) ([]byte, [][]byte, []Pair) { - t.Helper() - - tmp := t.TempDir() - - memdb, err := chaindb.NewBadgerDB(&chaindb.Config{ - InMemory: true, - DataDir: tmp, - }) - require.NoError(t, err) - - trie := NewEmptyTrie() - for _, e := range entries { - trie.Put(e.Key, e.Value) - } - - err = trie.Store(memdb) - require.NoError(t, err) - - root := trie.root.HashDigest - proof, err := GenerateProof(root, keys, memdb) - require.NoError(t, err) - - items := make([]Pair, len(keys)) - for idx, key := range keys { - value := trie.Get(key) - require.NotNil(t, value) - - items[idx] = Pair{ - Key: key, - Value: value, - } - } - - return root, proof, items -} - -func TestVerifyProof_ShouldReturnTrue(t *testing.T) { - t.Parallel() - - entries := []Pair{ - {Key: []byte("alpha"), Value: make([]byte, 32)}, - {Key: []byte("bravo"), Value: []byte("bravo")}, - {Key: []byte("do"), Value: []byte("verb")}, - {Key: []byte("dogea"), Value: []byte("puppy")}, - {Key: []byte("dogeb"), Value: []byte("puppy")}, - {Key: []byte("horse"), Value: []byte("stallion")}, - {Key: []byte("house"), Value: []byte("building")}, - } - - keys := [][]byte{ - []byte("do"), - []byte("dogea"), - []byte("dogeb"), - } - - root, proof, pairs := testGenerateProof(t, entries, keys) - v, err := VerifyProof(proof, root, pairs) - - require.NoError(t, err) - require.True(t, v) -} - -func TestVerifyProof_ShouldReturnDuplicateKeysError(t *testing.T) { - t.Parallel() - - pl := []Pair{ - {Key: []byte("do"), Value: []byte("verb")}, - {Key: []byte("do"), Value: []byte("puppy")}, - } - - v, err := VerifyProof([][]byte{}, []byte{}, pl) - require.False(t, v) - require.Error(t, err, ErrDuplicateKeys) -} - -func TestVerifyProof_ShouldReturnTrueWithouCompareValues(t *testing.T) { - t.Parallel() - - entries := []Pair{ - {Key: []byte("alpha"), Value: make([]byte, 32)}, - {Key: []byte("bravo"), Value: []byte("bravo")}, - {Key: []byte("do"), Value: []byte("verb")}, - {Key: []byte("dog"), Value: []byte("puppy")}, - {Key: []byte("doge"), Value: make([]byte, 32)}, - {Key: []byte("horse"), Value: []byte("stallion")}, - {Key: []byte("house"), Value: []byte("building")}, - } - - keys := [][]byte{ - []byte("do"), - []byte("dog"), - []byte("doge"), - } - - root, proof, _ := testGenerateProof(t, entries, keys) - - pl := []Pair{ - {Key: []byte("do"), Value: nil}, - {Key: []byte("dog"), Value: nil}, - {Key: []byte("doge"), Value: nil}, - } - - v, err := VerifyProof(proof, root, pl) - require.True(t, v) - require.NoError(t, err) -} - -func TestBranchNodes_SameHash_DifferentPaths_GenerateAndVerifyProof(t *testing.T) { - value := []byte("somevalue") - entries := []Pair{ - {Key: []byte("d"), Value: value}, - {Key: []byte("b"), Value: value}, - {Key: []byte("dxyz"), Value: value}, - {Key: []byte("bxyz"), Value: value}, - {Key: []byte("dxyzi"), Value: value}, - {Key: []byte("bxyzi"), Value: value}, - } - - keys := [][]byte{ - []byte("d"), - []byte("b"), - []byte("dxyz"), - []byte("bxyz"), - []byte("dxyzi"), - []byte("bxyzi"), - } - - root, proof, pairs := testGenerateProof(t, entries, keys) - - ok, err := VerifyProof(proof, root, pairs) - require.NoError(t, err) - require.True(t, ok) -} - -func TestLeafNodes_SameHash_DifferentPaths_GenerateAndVerifyProof(t *testing.T) { - tmp := t.TempDir() - - memdb, err := chaindb.NewBadgerDB(&chaindb.Config{ - InMemory: true, - DataDir: tmp, - }) - require.NoError(t, err) - - var ( - value = []byte("somevalue") - key1 = []byte("worlda") - key2 = []byte("worldb") - ) - - tt := NewEmptyTrie() - tt.Put(key1, value) - tt.Put(key2, value) - - err = tt.Store(memdb) - require.NoError(t, err) - - hash, err := tt.Hash() - require.NoError(t, err) - - proof, err := GenerateProof(hash.ToBytes(), [][]byte{key1, key2}, memdb) - require.NoError(t, err) - - pairs := []Pair{ - {Key: key1, Value: value}, - {Key: key2, Value: value}, - } - - ok, err := VerifyProof(proof, hash.ToBytes(), pairs) - require.NoError(t, err) - require.True(t, ok) -} From 191f77086c710fa6dd4b89c7a8ed4e357ee335cf Mon Sep 17 00:00:00 2001 From: Kishan Sagathiya Date: Thu, 7 Jul 2022 09:47:07 +0530 Subject: [PATCH 39/48] Make sure message that we receive are below their threshold sizes (#2628) Fixes #2399 --- dot/network/helpers_test.go | 2 +- dot/network/inbound.go | 5 +++-- dot/network/light.go | 2 +- dot/network/notifications.go | 11 +++++++---- dot/network/notifications_test.go | 4 ++-- dot/network/service.go | 7 +++++-- dot/network/sync.go | 7 +++---- dot/network/utils.go | 16 +++++++++++++--- lib/grandpa/mocks/network.go | 10 +++++----- lib/grandpa/network.go | 1 + lib/grandpa/round_test.go | 1 + lib/grandpa/state.go | 1 + 12 files changed, 43 insertions(+), 24 deletions(-) diff --git a/dot/network/helpers_test.go b/dot/network/helpers_test.go index 378db3f4c3..fb5d5f8d1e 100644 --- a/dot/network/helpers_test.go +++ b/dot/network/helpers_test.go @@ -85,7 +85,7 @@ func (s *testStreamHandler) readStream(stream libp2pnetwork.Stream, }() for { - tot, err := readStream(stream, &msgBytes) + tot, err := readStream(stream, &msgBytes, maxBlockResponseSize) if errors.Is(err, io.EOF) { return } else if err != nil { diff --git a/dot/network/inbound.go b/dot/network/inbound.go index 9d673a6f8e..bd0a1674ee 100644 --- a/dot/network/inbound.go +++ b/dot/network/inbound.go @@ -7,7 +7,8 @@ import ( libp2pnetwork "github.com/libp2p/go-libp2p-core/network" ) -func (s *Service) readStream(stream libp2pnetwork.Stream, decoder messageDecoder, handler messageHandler) { +func (s *Service) readStream(stream libp2pnetwork.Stream, decoder messageDecoder, handler messageHandler, + maxSize uint64) { // we NEED to reset the stream if we ever return from this function, as if we return, // the stream will never again be read by us, so we need to tell the remote side we're // done with this stream, and they should also forget about it. @@ -19,7 +20,7 @@ func (s *Service) readStream(stream libp2pnetwork.Stream, decoder messageDecoder defer s.bufPool.Put(buffer) for { - n, err := readStream(stream, buffer) + n, err := readStream(stream, buffer, maxSize) if err != nil { logger.Tracef( "failed to read from stream id %s of peer %s using protocol %s: %s", diff --git a/dot/network/light.go b/dot/network/light.go index d782568133..15a009194d 100644 --- a/dot/network/light.go +++ b/dot/network/light.go @@ -16,7 +16,7 @@ import ( // handleLightStream handles streams with the /light/2 protocol ID func (s *Service) handleLightStream(stream libp2pnetwork.Stream) { - s.readStream(stream, s.decodeLightMessage, s.handleLightMsg) + s.readStream(stream, s.decodeLightMessage, s.handleLightMsg, maxBlockResponseSize) } func (s *Service) decodeLightMessage(in []byte, peer peer.ID, _ bool) (Message, error) { diff --git a/dot/network/notifications.go b/dot/network/notifications.go index 758c97b99d..38fa9fa731 100644 --- a/dot/network/notifications.go +++ b/dot/network/notifications.go @@ -65,16 +65,18 @@ type notificationsProtocol struct { handshakeDecoder HandshakeDecoder handshakeValidator HandshakeValidator peersData *peersData + maxSize uint64 } func newNotificationsProtocol(protocolID protocol.ID, handshakeGetter HandshakeGetter, - handshakeDecoder HandshakeDecoder, handshakeValidator HandshakeValidator) *notificationsProtocol { + handshakeDecoder HandshakeDecoder, handshakeValidator HandshakeValidator, maxSize uint64) *notificationsProtocol { return ¬ificationsProtocol{ protocolID: protocolID, getHandshake: handshakeGetter, handshakeValidator: handshakeValidator, handshakeDecoder: handshakeDecoder, peersData: newPeersData(), + maxSize: maxSize, } } @@ -350,7 +352,7 @@ func (s *Service) sendHandshake(peer peer.ID, hs Handshake, info *notificationsP logger.Tracef("handshake timeout reached for peer %s using protocol %s", peer, info.protocolID) closeOutboundStream(info, peer, stream) return nil, errHandshakeTimeout - case hsResponse := <-s.readHandshake(stream, info.handshakeDecoder): + case hsResponse := <-s.readHandshake(stream, info.handshakeDecoder, info.maxSize): if !hsTimer.Stop() { <-hsTimer.C } @@ -411,7 +413,8 @@ func (s *Service) broadcastExcluding(info *notificationsProtocol, excluding peer } } -func (s *Service) readHandshake(stream libp2pnetwork.Stream, decoder HandshakeDecoder) <-chan *handshakeReader { +func (s *Service) readHandshake(stream libp2pnetwork.Stream, decoder HandshakeDecoder, maxSize uint64, +) <-chan *handshakeReader { hsC := make(chan *handshakeReader) go func() { @@ -420,7 +423,7 @@ func (s *Service) readHandshake(stream libp2pnetwork.Stream, decoder HandshakeDe buffer := s.bufPool.Get().(*[]byte) defer s.bufPool.Put(buffer) - tot, err := readStream(stream, buffer) + tot, err := readStream(stream, buffer, maxSize) if err != nil { hsC <- &handshakeReader{hs: nil, err: err} return diff --git a/dot/network/notifications_test.go b/dot/network/notifications_test.go index 172f212c21..c4ad87a5f8 100644 --- a/dot/network/notifications_test.go +++ b/dot/network/notifications_test.go @@ -247,8 +247,8 @@ func Test_HandshakeTimeout(t *testing.T) { testHandshakeDecoder := func([]byte) (Handshake, error) { return nil, errors.New("unimplemented") } - info := newNotificationsProtocol(nodeA.host.protocolID+blockAnnounceID, - nodeA.getBlockAnnounceHandshake, testHandshakeDecoder, nodeA.validateBlockAnnounceHandshake) + info := newNotificationsProtocol(nodeA.host.protocolID+blockAnnounceID, nodeA.getBlockAnnounceHandshake, + testHandshakeDecoder, nodeA.validateBlockAnnounceHandshake, maxBlockAnnounceNotificationSize) nodeB.host.p2pHost.SetStreamHandler(info.protocolID, func(stream libp2pnetwork.Stream) { // should not respond to a handshake message diff --git a/dot/network/service.go b/dot/network/service.go index 6374bdb9ea..5bb7e72518 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -251,6 +251,7 @@ func (s *Service) Start() error { decodeBlockAnnounceMessage, s.handleBlockAnnounceMessage, nil, + maxBlockAnnounceNotificationSize, ) if err != nil { logger.Warnf("failed to register notifications protocol with block announce id %s: %s", @@ -270,6 +271,7 @@ func (s *Service) Start() error { decodeTransactionMessage, s.handleTransactionMessage, txnBatchHandler, + maxTransactionsNotificationSize, ) if err != nil { logger.Warnf("failed to register notifications protocol with transaction id %s: %s", transactionsID, err) @@ -514,6 +516,7 @@ func (s *Service) RegisterNotificationsProtocol( messageDecoder MessageDecoder, messageHandler NotificationsMessageHandler, batchHandler NotificationsMessageBatchHandler, + maxSize uint64, ) error { s.notificationsMu.Lock() defer s.notificationsMu.Unlock() @@ -522,14 +525,14 @@ func (s *Service) RegisterNotificationsProtocol( return errors.New("notifications protocol with message type already exists") } - np := newNotificationsProtocol(protocolID, handshakeGetter, handshakeDecoder, handshakeValidator) + np := newNotificationsProtocol(protocolID, handshakeGetter, handshakeDecoder, handshakeValidator, maxSize) s.notificationsProtocols[messageID] = np decoder := createDecoder(np, handshakeDecoder, messageDecoder) handlerWithValidate := s.createNotificationsMessageHandler(np, messageHandler, batchHandler) s.host.registerStreamHandler(protocolID, func(stream libp2pnetwork.Stream) { logger.Tracef("received stream using sub-protocol %s", protocolID) - s.readStream(stream, decoder, handlerWithValidate) + s.readStream(stream, decoder, handlerWithValidate, maxSize) }) logger.Infof("registered notifications sub-protocol %s", protocolID) diff --git a/dot/network/sync.go b/dot/network/sync.go index 856447254e..dad164d94d 100644 --- a/dot/network/sync.go +++ b/dot/network/sync.go @@ -14,8 +14,7 @@ import ( ) var ( - maxBlockResponseSize uint64 = 1024 * 1024 * 4 // 4mb - blockRequestTimeout = time.Second * 5 + blockRequestTimeout = time.Second * 20 ) // DoBlockRequest sends a request to the given peer. @@ -58,7 +57,7 @@ func (s *Service) receiveBlockResponse(stream libp2pnetwork.Stream) (*BlockRespo buf := s.blockResponseBuf - n, err := readStream(stream, &buf) + n, err := readStream(stream, &buf, maxBlockResponseSize) if err != nil { return nil, fmt.Errorf("read stream error: %w", err) } @@ -86,7 +85,7 @@ func (s *Service) handleSyncStream(stream libp2pnetwork.Stream) { return } - s.readStream(stream, decodeSyncMessage, s.handleSyncMessage) + s.readStream(stream, decodeSyncMessage, s.handleSyncMessage, maxBlockResponseSize) } func decodeSyncMessage(in []byte, _ peer.ID, _ bool) (Message, error) { diff --git a/dot/network/utils.go b/dot/network/utils.go index 964685bf42..b98c82eb12 100644 --- a/dot/network/utils.go +++ b/dot/network/utils.go @@ -20,6 +20,16 @@ import ( "github.com/multiformats/go-multiaddr" ) +const ( + // maxBlockRequestSize uint64 = 1024 * 1024 // 1mb + maxBlockResponseSize uint64 = 1024 * 1024 * 16 // 16mb + // MaxGrandpaNotificationSize is maximum size for a grandpa notification message. + MaxGrandpaNotificationSize uint64 = 1024 * 1024 // 1mb + maxTransactionsNotificationSize uint64 = 1024 * 1024 * 16 // 16mb + maxBlockAnnounceNotificationSize uint64 = 1024 * 1024 // 1mb + +) + func isInbound(stream libp2pnetwork.Stream) bool { return stream.Stat().Direction == libp2pnetwork.DirInbound } @@ -176,7 +186,7 @@ func readLEB128ToUint64(r io.Reader, buf []byte) (uint64, int, error) { } // readStream reads from the stream into the given buffer, returning the number of bytes read -func readStream(stream libp2pnetwork.Stream, bufPointer *[]byte) (int, error) { +func readStream(stream libp2pnetwork.Stream, bufPointer *[]byte, maxSize uint64) (int, error) { if stream == nil { return 0, errors.New("stream is nil") } @@ -201,8 +211,8 @@ func readStream(stream libp2pnetwork.Stream, bufPointer *[]byte) (int, error) { logger.Warnf("received message with size %d greater than allocated message buffer size %d", length, len(buf)) } - if length > maxBlockResponseSize { - logger.Warnf("received message with size %d greater than maxBlockResponseSize, closing stream", length) + if length > maxSize { + logger.Warnf("received message with size %d greater than max size %d, closing stream", length, maxSize) return 0, fmt.Errorf("message size greater than maximum: got %d", length) } diff --git a/lib/grandpa/mocks/network.go b/lib/grandpa/mocks/network.go index 1dc56facd9..25c5a84a5c 100644 --- a/lib/grandpa/mocks/network.go +++ b/lib/grandpa/mocks/network.go @@ -21,13 +21,13 @@ func (_m *Network) GossipMessage(msg network.NotificationsMessage) { _m.Called(msg) } -// RegisterNotificationsProtocol provides a mock function with given fields: sub, messageID, handshakeGetter, handshakeDecoder, handshakeValidator, messageDecoder, messageHandler, batchHandler -func (_m *Network) RegisterNotificationsProtocol(sub protocol.ID, messageID byte, handshakeGetter func() (network.Handshake, error), handshakeDecoder func([]byte) (network.Handshake, error), handshakeValidator func(peer.ID, network.Handshake) error, messageDecoder func([]byte) (network.NotificationsMessage, error), messageHandler func(peer.ID, network.NotificationsMessage) (bool, error), batchHandler func(peer.ID, network.NotificationsMessage)) error { - ret := _m.Called(sub, messageID, handshakeGetter, handshakeDecoder, handshakeValidator, messageDecoder, messageHandler, batchHandler) +// RegisterNotificationsProtocol provides a mock function with given fields: sub, messageID, handshakeGetter, handshakeDecoder, handshakeValidator, messageDecoder, messageHandler, batchHandler, maxSize +func (_m *Network) RegisterNotificationsProtocol(sub protocol.ID, messageID byte, handshakeGetter func() (network.Handshake, error), handshakeDecoder func([]byte) (network.Handshake, error), handshakeValidator func(peer.ID, network.Handshake) error, messageDecoder func([]byte) (network.NotificationsMessage, error), messageHandler func(peer.ID, network.NotificationsMessage) (bool, error), batchHandler func(peer.ID, network.NotificationsMessage), maxSize uint64) error { + ret := _m.Called(sub, messageID, handshakeGetter, handshakeDecoder, handshakeValidator, messageDecoder, messageHandler, batchHandler, maxSize) var r0 error - if rf, ok := ret.Get(0).(func(protocol.ID, byte, func() (network.Handshake, error), func([]byte) (network.Handshake, error), func(peer.ID, network.Handshake) error, func([]byte) (network.NotificationsMessage, error), func(peer.ID, network.NotificationsMessage) (bool, error), func(peer.ID, network.NotificationsMessage)) error); ok { - r0 = rf(sub, messageID, handshakeGetter, handshakeDecoder, handshakeValidator, messageDecoder, messageHandler, batchHandler) + if rf, ok := ret.Get(0).(func(protocol.ID, byte, func() (network.Handshake, error), func([]byte) (network.Handshake, error), func(peer.ID, network.Handshake) error, func([]byte) (network.NotificationsMessage, error), func(peer.ID, network.NotificationsMessage) (bool, error), func(peer.ID, network.NotificationsMessage), uint64) error); ok { + r0 = rf(sub, messageID, handshakeGetter, handshakeDecoder, handshakeValidator, messageDecoder, messageHandler, batchHandler, maxSize) } else { r0 = ret.Error(0) } diff --git a/lib/grandpa/network.go b/lib/grandpa/network.go index 5caaaf91c5..50fffe956e 100644 --- a/lib/grandpa/network.go +++ b/lib/grandpa/network.go @@ -83,6 +83,7 @@ func (s *Service) registerProtocol() error { s.decodeMessage, s.handleNetworkMessage, nil, + network.MaxGrandpaNotificationSize, ) } diff --git a/lib/grandpa/round_test.go b/lib/grandpa/round_test.go index 2ed107a4cb..efca476147 100644 --- a/lib/grandpa/round_test.go +++ b/lib/grandpa/round_test.go @@ -80,6 +80,7 @@ func (*testNetwork) RegisterNotificationsProtocol( _ network.MessageDecoder, _ network.NotificationsMessageHandler, _ network.NotificationsMessageBatchHandler, + _ uint64, ) error { return nil } diff --git a/lib/grandpa/state.go b/lib/grandpa/state.go index 15f85f7722..cb2fd4b1e9 100644 --- a/lib/grandpa/state.go +++ b/lib/grandpa/state.go @@ -68,5 +68,6 @@ type Network interface { messageDecoder network.MessageDecoder, messageHandler network.NotificationsMessageHandler, batchHandler network.NotificationsMessageBatchHandler, + maxSize uint64, ) error } From 2bb260928d635505060d1a351ce3d8654158645a Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Thu, 7 Jul 2022 13:56:57 -0400 Subject: [PATCH 40/48] chore(ci): only run Fuzz test in fuzz workflow (#2586) --- .github/workflows/fuzz.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index fdc113e06e..5c594736e3 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -36,4 +36,4 @@ jobs: restore-keys: ${{ runner.os }}-go-mod - name: Fuzz trie - run: go test -fuzz=Fuzz_Trie_PutAndGet -fuzztime=5m github.com/ChainSafe/gossamer/lib/trie + run: go test -run Fuzz_Trie_PutAndGet -fuzz=Fuzz_Trie_PutAndGet -fuzztime=5m github.com/ChainSafe/gossamer/lib/trie From 422e7b3a6afcaac1f1b69617843c188438f6873f Mon Sep 17 00:00:00 2001 From: Edward Mack Date: Fri, 8 Jul 2022 15:59:02 -0400 Subject: [PATCH 41/48] fix(lib/grandpa): Duplicate votes is GRANDPA are counted as equivocatory votes (GSR-11) (#2624) --- lib/grandpa/errors.go | 1 - lib/grandpa/message_handler.go | 33 ++-- lib/grandpa/message_handler_test.go | 240 ++++++++++++++++++++++++---- 3 files changed, 217 insertions(+), 57 deletions(-) diff --git a/lib/grandpa/errors.go b/lib/grandpa/errors.go index c2d2435c29..478c368b5f 100644 --- a/lib/grandpa/errors.go +++ b/lib/grandpa/errors.go @@ -99,5 +99,4 @@ var ( errVoteToSignatureMismatch = errors.New("votes and authority count mismatch") errVoteBlockMismatch = errors.New("block in vote is not descendant of previously finalised block") errVoteFromSelf = errors.New("got vote from ourselves") - errInvalidMultiplicity = errors.New("more than two equivocatory votes for a voter") ) diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go index 79dc9b101c..6210b96d41 100644 --- a/lib/grandpa/message_handler.go +++ b/lib/grandpa/message_handler.go @@ -284,22 +284,20 @@ func (h *MessageHandler) verifyCatchUpResponseCompletability(prevote, precommit return nil } -func getEquivocatoryVoters(votes []AuthData) (map[ed25519.PublicKeyBytes]struct{}, error) { +func getEquivocatoryVoters(votes []AuthData) map[ed25519.PublicKeyBytes]struct{} { eqvVoters := make(map[ed25519.PublicKeyBytes]struct{}) - voters := make(map[ed25519.PublicKeyBytes]int, len(votes)) + voters := make(map[ed25519.PublicKeyBytes][64]byte, len(votes)) for _, v := range votes { - voters[v.AuthorityID]++ - switch voters[v.AuthorityID] { - case 1: - case 2: + signature, present := voters[v.AuthorityID] + if present && !bytes.Equal(signature[:], v.Signature[:]) { eqvVoters[v.AuthorityID] = struct{}{} - default: - return nil, fmt.Errorf("%w: authority id %x has %d votes", - errInvalidMultiplicity, v.AuthorityID, voters[v.AuthorityID]) + } else { + voters[v.AuthorityID] = v.Signature } } - return eqvVoters, nil + + return eqvVoters } func isDescendantOfHighestFinalisedBlock(blockState BlockState, hash common.Hash) (bool, error) { @@ -329,10 +327,7 @@ func (h *MessageHandler) verifyCommitMessageJustification(fm *CommitMessage) err return errVoteBlockMismatch } - eqvVoters, err := getEquivocatoryVoters(fm.AuthData) - if err != nil { - return fmt.Errorf("could not get valid equivocatory voters: %w", err) - } + eqvVoters := getEquivocatoryVoters(fm.AuthData) var count int for i, pc := range fm.Precommits { @@ -465,10 +460,7 @@ func (h *MessageHandler) verifyPreCommitJustification(msg *CatchUpResponse) erro return errVoteBlockMismatch } - eqvVoters, err := getEquivocatoryVoters(auths) - if err != nil { - return fmt.Errorf("could not get valid equivocatory voters: %w", err) - } + eqvVoters := getEquivocatoryVoters(auths) // verify pre-commit justification var count uint64 @@ -608,10 +600,7 @@ func (s *Service) VerifyBlockJustification(hash common.Hash, justification []byt authPubKeys[i] = AuthData{AuthorityID: pcj.AuthorityID} } - equivocatoryVoters, err := getEquivocatoryVoters(authPubKeys) - if err != nil { - return nil, fmt.Errorf("could not get valid equivocatory voters: %w", err) - } + equivocatoryVoters := getEquivocatoryVoters(authPubKeys) var count int diff --git a/lib/grandpa/message_handler_test.go b/lib/grandpa/message_handler_test.go index 0f378b7817..b472ce4d25 100644 --- a/lib/grandpa/message_handler_test.go +++ b/lib/grandpa/message_handler_test.go @@ -807,44 +807,216 @@ func TestMessageHandler_VerifyBlockJustification_invalid(t *testing.T) { } func Test_getEquivocatoryVoters(t *testing.T) { - // many of equivocatory votes + t.Parallel() + ed25519Keyring, err := keystore.NewEd25519Keyring() require.NoError(t, err) - fakeAuthorities := []*ed25519.Keypair{ - ed25519Keyring.Alice().(*ed25519.Keypair), - ed25519Keyring.Alice().(*ed25519.Keypair), - ed25519Keyring.Bob().(*ed25519.Keypair), - ed25519Keyring.Charlie().(*ed25519.Keypair), - ed25519Keyring.Charlie().(*ed25519.Keypair), - ed25519Keyring.Dave().(*ed25519.Keypair), - ed25519Keyring.Dave().(*ed25519.Keypair), - ed25519Keyring.Eve().(*ed25519.Keypair), - ed25519Keyring.Ferdie().(*ed25519.Keypair), - ed25519Keyring.Heather().(*ed25519.Keypair), - ed25519Keyring.Heather().(*ed25519.Keypair), - ed25519Keyring.Ian().(*ed25519.Keypair), - ed25519Keyring.Ian().(*ed25519.Keypair), + tests := map[string]struct { + votes []AuthData + want map[ed25519.PublicKeyBytes]struct{} + }{ + "no votes": { + votes: []AuthData{}, + want: map[ed25519.PublicKeyBytes]struct{}{}, + }, + "one vote": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{}, + }, + "two votes different authorities": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{}, + }, + "duplicate votes": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{}, + }, + "equivocatory vote": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{ + ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(): {}, + }, + }, + "equivocatory vote with duplicate": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{ + ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(): {}, + }, + }, + "three voters one equivocatory": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + { + AuthorityID: ed25519Keyring.Charlie().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{ + ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(): {}, + }, + }, + "three voters one equivocatory one duplicate": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + { + AuthorityID: ed25519Keyring.Charlie().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{ + ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(): {}, + }, + }, + "three voters two equivocatory": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + { + AuthorityID: ed25519Keyring.Charlie().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{ + ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(): {}, + ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(): {}, + }, + }, + "three voters two duplicate": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Charlie().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{}, + }, + "three voters": { + votes: []AuthData{ + { + AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Bob().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{1, 2, 3, 4}, + }, + { + AuthorityID: ed25519Keyring.Charlie().Public().(*ed25519.PublicKey).AsBytes(), + Signature: [64]byte{5, 6, 7, 8}, + }, + }, + want: map[ed25519.PublicKeyBytes]struct{}{}, + }, } - - authData := make([]AuthData, len(fakeAuthorities)) - - for i, auth := range fakeAuthorities { - authData[i] = AuthData{ - AuthorityID: auth.Public().(*ed25519.PublicKey).AsBytes(), - } + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + got := getEquivocatoryVoters(tt.votes) + assert.Equalf(t, tt.want, got, "getEquivocatoryVoters(%v)", tt.votes) + }) } - - eqv, err := getEquivocatoryVoters(authData) - require.NoError(t, err) - require.Len(t, eqv, 5) - - // test that getEquivocatoryVoters returns an error if a voter has more than two equivocatory votes - authData = append(authData, AuthData{ - AuthorityID: ed25519Keyring.Alice().Public().(*ed25519.PublicKey).AsBytes(), - }) - - _, err = getEquivocatoryVoters(authData) - require.ErrorIs(t, err, errInvalidMultiplicity) } func Test_VerifyCommitMessageJustification_ShouldRemoveEquivocatoryVotes(t *testing.T) { From 78c03b69b4702c2b724d2306517c04037f1206b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ecl=C3=A9sio=20Junior?= Date: Fri, 8 Jul 2022 16:10:35 -0400 Subject: [PATCH 42/48] fix(lib/babe): ensure the slot time is correct before build a block (#2648) * fix: ensure the slot time is correct before build a block * chore: sleep is inevitable --- lib/babe/epoch_handler.go | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/lib/babe/epoch_handler.go b/lib/babe/epoch_handler.go index 0e78d6c996..b2cde8af8c 100644 --- a/lib/babe/epoch_handler.go +++ b/lib/babe/epoch_handler.go @@ -78,8 +78,9 @@ func (h *epochHandler) run(ctx context.Context, errCh chan<- error) { authoringSlots := getAuthoringSlots(h.slotToPreRuntimeDigest) type slotWithTimer struct { - timer *time.Timer - slotNum uint64 + startTime time.Time + timer *time.Timer + slotNum uint64 } slotTimeTimers := make([]*slotWithTimer, 0, len(authoringSlots)) @@ -90,10 +91,15 @@ func (h *epochHandler) run(ctx context.Context, errCh chan<- error) { } startTime := getSlotStartTime(authoringSlot, h.constants.slotDuration) + waitTime := startTime.Sub(time.Now()) + timer := time.NewTimer(waitTime) + slotTimeTimers = append(slotTimeTimers, &slotWithTimer{ - timer: time.NewTimer(time.Until(startTime)), - slotNum: authoringSlot, + timer: timer, + slotNum: authoringSlot, + startTime: startTime, }) + logger.Debugf("start time of slot %d: %v", authoringSlot, startTime) } @@ -115,6 +121,15 @@ func (h *epochHandler) run(ctx context.Context, errCh chan<- error) { case <-ctx.Done(): return case <-swt.timer.C: + // we must do a time correction as the slot timer sometimes is triggered + // before the time defined in the constructor due to an inconsistency + // of the language -> https://github.com/golang/go/issues/17696 + + diff := time.Since(swt.startTime) + if diff < 0 { + time.Sleep(-diff) + } + if _, has := h.slotToPreRuntimeDigest[swt.slotNum]; !has { // this should never happen panic(fmt.Sprintf("no VRF proof for authoring slot! slot=%d", swt.slotNum)) From 8fd21882b62789c07cf19b05e176d4ad542a8314 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Mon, 11 Jul 2022 08:57:25 -0400 Subject: [PATCH 43/48] chore(lib/runtime/wasmer): import functions in a for loop (#2650) - Change `ImportsNodeRuntime` to be a for loop of import appends - Change deprecated `.Append` to `.AppendFunction` - Update function comment - Sort alphabetically appends --- lib/runtime/wasmer/imports.go | 442 ++++++++-------------------------- 1 file changed, 97 insertions(+), 345 deletions(-) diff --git a/lib/runtime/wasmer/imports.go b/lib/runtime/wasmer/imports.go index 509c5a3e34..0d9d8539ac 100644 --- a/lib/runtime/wasmer/imports.go +++ b/lib/runtime/wasmer/imports.go @@ -2230,351 +2230,103 @@ func toWasmMemoryFixedSizeOptional(context wasm.InstanceContext, data []byte) (i return toWasmMemory(context, enc) } -// ImportsNodeRuntime returns the imports for the v0.8 runtime -func ImportsNodeRuntime() (*wasm.Imports, error) { //nolint:gocyclo - var err error - - imports := wasm.NewImports() - - _, err = imports.Append("ext_allocator_free_version_1", ext_allocator_free_version_1, C.ext_allocator_free_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_allocator_malloc_version_1", ext_allocator_malloc_version_1, C.ext_allocator_malloc_version_1) - if err != nil { - return nil, err - } - - _, err = imports.Append("ext_crypto_ed25519_generate_version_1", ext_crypto_ed25519_generate_version_1, C.ext_crypto_ed25519_generate_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_ed25519_public_keys_version_1", ext_crypto_ed25519_public_keys_version_1, C.ext_crypto_ed25519_public_keys_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_ed25519_sign_version_1", ext_crypto_ed25519_sign_version_1, C.ext_crypto_ed25519_sign_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_ed25519_verify_version_1", ext_crypto_ed25519_verify_version_1, C.ext_crypto_ed25519_verify_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_finish_batch_verify_version_1", ext_crypto_finish_batch_verify_version_1, C.ext_crypto_finish_batch_verify_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_secp256k1_ecdsa_recover_version_1", ext_crypto_secp256k1_ecdsa_recover_version_1, C.ext_crypto_secp256k1_ecdsa_recover_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_secp256k1_ecdsa_recover_version_2", ext_crypto_secp256k1_ecdsa_recover_version_2, C.ext_crypto_secp256k1_ecdsa_recover_version_2) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_secp256k1_ecdsa_recover_compressed_version_1", ext_crypto_secp256k1_ecdsa_recover_compressed_version_1, C.ext_crypto_secp256k1_ecdsa_recover_compressed_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_secp256k1_ecdsa_recover_compressed_version_2", ext_crypto_secp256k1_ecdsa_recover_compressed_version_2, C.ext_crypto_secp256k1_ecdsa_recover_compressed_version_2) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_sr25519_generate_version_1", ext_crypto_sr25519_generate_version_1, C.ext_crypto_sr25519_generate_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_sr25519_public_keys_version_1", ext_crypto_sr25519_public_keys_version_1, C.ext_crypto_sr25519_public_keys_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_sr25519_sign_version_1", ext_crypto_sr25519_sign_version_1, C.ext_crypto_sr25519_sign_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_sr25519_verify_version_1", ext_crypto_sr25519_verify_version_1, C.ext_crypto_sr25519_verify_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_sr25519_verify_version_2", ext_crypto_sr25519_verify_version_2, C.ext_crypto_sr25519_verify_version_2) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_ecdsa_verify_version_2", ext_crypto_ecdsa_verify_version_2, C.ext_crypto_ecdsa_verify_version_2) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_crypto_start_batch_verify_version_1", ext_crypto_start_batch_verify_version_1, C.ext_crypto_start_batch_verify_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_clear_version_1", ext_default_child_storage_clear_version_1, C.ext_default_child_storage_clear_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_clear_prefix_version_1", ext_default_child_storage_clear_prefix_version_1, C.ext_default_child_storage_clear_prefix_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_exists_version_1", ext_default_child_storage_exists_version_1, C.ext_default_child_storage_exists_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_get_version_1", ext_default_child_storage_get_version_1, C.ext_default_child_storage_get_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_next_key_version_1", ext_default_child_storage_next_key_version_1, C.ext_default_child_storage_next_key_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_read_version_1", ext_default_child_storage_read_version_1, C.ext_default_child_storage_read_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_root_version_1", ext_default_child_storage_root_version_1, C.ext_default_child_storage_root_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_set_version_1", ext_default_child_storage_set_version_1, C.ext_default_child_storage_set_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_storage_kill_version_1", ext_default_child_storage_storage_kill_version_1, C.ext_default_child_storage_storage_kill_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_storage_kill_version_2", ext_default_child_storage_storage_kill_version_2, C.ext_default_child_storage_storage_kill_version_2) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_default_child_storage_storage_kill_version_3", ext_default_child_storage_storage_kill_version_3, C.ext_default_child_storage_storage_kill_version_3) - if err != nil { - return nil, err - } - - _, err = imports.Append("ext_hashing_blake2_128_version_1", ext_hashing_blake2_128_version_1, C.ext_hashing_blake2_128_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_hashing_blake2_256_version_1", ext_hashing_blake2_256_version_1, C.ext_hashing_blake2_256_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_hashing_keccak_256_version_1", ext_hashing_keccak_256_version_1, C.ext_hashing_keccak_256_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_hashing_sha2_256_version_1", ext_hashing_sha2_256_version_1, C.ext_hashing_sha2_256_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_hashing_twox_256_version_1", ext_hashing_twox_256_version_1, C.ext_hashing_twox_256_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_hashing_twox_128_version_1", ext_hashing_twox_128_version_1, C.ext_hashing_twox_128_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_hashing_twox_64_version_1", ext_hashing_twox_64_version_1, C.ext_hashing_twox_64_version_1) - if err != nil { - return nil, err - } - - _, err = imports.Append("ext_logging_log_version_1", ext_logging_log_version_1, C.ext_logging_log_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_logging_max_level_version_1", ext_logging_max_level_version_1, C.ext_logging_max_level_version_1) - if err != nil { - return nil, err - } - - _, err = imports.Append("ext_misc_print_hex_version_1", ext_misc_print_hex_version_1, C.ext_misc_print_hex_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_misc_print_num_version_1", ext_misc_print_num_version_1, C.ext_misc_print_num_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_misc_print_utf8_version_1", ext_misc_print_utf8_version_1, C.ext_misc_print_utf8_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_misc_runtime_version_version_1", ext_misc_runtime_version_version_1, C.ext_misc_runtime_version_version_1) - if err != nil { - return nil, err - } - - _, err = imports.Append("ext_offchain_index_set_version_1", ext_offchain_index_set_version_1, C.ext_offchain_index_set_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_is_validator_version_1", ext_offchain_is_validator_version_1, C.ext_offchain_is_validator_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_local_storage_clear_version_1", ext_offchain_local_storage_clear_version_1, C.ext_offchain_local_storage_clear_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_local_storage_compare_and_set_version_1", ext_offchain_local_storage_compare_and_set_version_1, C.ext_offchain_local_storage_compare_and_set_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_local_storage_get_version_1", ext_offchain_local_storage_get_version_1, C.ext_offchain_local_storage_get_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_local_storage_set_version_1", ext_offchain_local_storage_set_version_1, C.ext_offchain_local_storage_set_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_network_state_version_1", ext_offchain_network_state_version_1, C.ext_offchain_network_state_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_random_seed_version_1", ext_offchain_random_seed_version_1, C.ext_offchain_random_seed_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_submit_transaction_version_1", ext_offchain_submit_transaction_version_1, C.ext_offchain_submit_transaction_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_timestamp_version_1", ext_offchain_timestamp_version_1, C.ext_offchain_timestamp_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_sleep_until_version_1", ext_offchain_sleep_until_version_1, C.ext_offchain_sleep_until_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_http_request_start_version_1", ext_offchain_http_request_start_version_1, C.ext_offchain_http_request_start_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_offchain_http_request_add_header_version_1", ext_offchain_http_request_add_header_version_1, C.ext_offchain_http_request_add_header_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_sandbox_instance_teardown_version_1", ext_sandbox_instance_teardown_version_1, C.ext_sandbox_instance_teardown_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_sandbox_instantiate_version_1", ext_sandbox_instantiate_version_1, C.ext_sandbox_instantiate_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_sandbox_invoke_version_1", ext_sandbox_invoke_version_1, C.ext_sandbox_invoke_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_sandbox_memory_get_version_1", ext_sandbox_memory_get_version_1, C.ext_sandbox_memory_get_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_sandbox_memory_new_version_1", ext_sandbox_memory_new_version_1, C.ext_sandbox_memory_new_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_sandbox_memory_set_version_1", ext_sandbox_memory_set_version_1, C.ext_sandbox_memory_set_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_sandbox_memory_teardown_version_1", ext_sandbox_memory_teardown_version_1, C.ext_sandbox_memory_teardown_version_1) - if err != nil { - return nil, err - } - - _, err = imports.Append("ext_storage_append_version_1", ext_storage_append_version_1, C.ext_storage_append_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_changes_root_version_1", ext_storage_changes_root_version_1, C.ext_storage_changes_root_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_clear_version_1", ext_storage_clear_version_1, C.ext_storage_clear_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_clear_prefix_version_1", ext_storage_clear_prefix_version_1, C.ext_storage_clear_prefix_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_clear_prefix_version_2", ext_storage_clear_prefix_version_2, C.ext_storage_clear_prefix_version_2) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_commit_transaction_version_1", ext_storage_commit_transaction_version_1, C.ext_storage_commit_transaction_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_exists_version_1", ext_storage_exists_version_1, C.ext_storage_exists_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_get_version_1", ext_storage_get_version_1, C.ext_storage_get_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_next_key_version_1", ext_storage_next_key_version_1, C.ext_storage_next_key_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_read_version_1", ext_storage_read_version_1, C.ext_storage_read_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_rollback_transaction_version_1", ext_storage_rollback_transaction_version_1, C.ext_storage_rollback_transaction_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_root_version_1", ext_storage_root_version_1, C.ext_storage_root_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_root_version_2", ext_storage_root_version_2, C.ext_storage_root_version_2) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_set_version_1", ext_storage_set_version_1, C.ext_storage_set_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_storage_start_transaction_version_1", ext_storage_start_transaction_version_1, C.ext_storage_start_transaction_version_1) - if err != nil { - return nil, err - } - - _, err = imports.Append("ext_trie_blake2_256_ordered_root_version_1", ext_trie_blake2_256_ordered_root_version_1, C.ext_trie_blake2_256_ordered_root_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_trie_blake2_256_ordered_root_version_2", ext_trie_blake2_256_ordered_root_version_2, C.ext_trie_blake2_256_ordered_root_version_2) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_trie_blake2_256_root_version_1", ext_trie_blake2_256_root_version_1, C.ext_trie_blake2_256_root_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_trie_blake2_256_verify_proof_version_1", ext_trie_blake2_256_verify_proof_version_1, C.ext_trie_blake2_256_verify_proof_version_1) - if err != nil { - return nil, err - } - - _, err = imports.Append("ext_transaction_index_index_version_1", ext_transaction_index_index_version_1, C.ext_transaction_index_index_version_1) - if err != nil { - return nil, err - } - _, err = imports.Append("ext_transaction_index_renew_version_1", ext_transaction_index_renew_version_1, C.ext_transaction_index_renew_version_1) - if err != nil { - return nil, err +// ImportsNodeRuntime returns the WASM imports for the node runtime. +func ImportsNodeRuntime() (imports *wasm.Imports, err error) { + imports = wasm.NewImports() + + for _, toRegister := range []struct { + importName string + implementation interface{} + cgoPointer unsafe.Pointer + }{ + {"ext_allocator_free_version_1", ext_allocator_free_version_1, C.ext_allocator_free_version_1}, + {"ext_allocator_malloc_version_1", ext_allocator_malloc_version_1, C.ext_allocator_malloc_version_1}, + {"ext_crypto_ecdsa_verify_version_2", ext_crypto_ecdsa_verify_version_2, C.ext_crypto_ecdsa_verify_version_2}, + {"ext_crypto_ed25519_generate_version_1", ext_crypto_ed25519_generate_version_1, C.ext_crypto_ed25519_generate_version_1}, + {"ext_crypto_ed25519_public_keys_version_1", ext_crypto_ed25519_public_keys_version_1, C.ext_crypto_ed25519_public_keys_version_1}, + {"ext_crypto_ed25519_sign_version_1", ext_crypto_ed25519_sign_version_1, C.ext_crypto_ed25519_sign_version_1}, + {"ext_crypto_ed25519_verify_version_1", ext_crypto_ed25519_verify_version_1, C.ext_crypto_ed25519_verify_version_1}, + {"ext_crypto_finish_batch_verify_version_1", ext_crypto_finish_batch_verify_version_1, C.ext_crypto_finish_batch_verify_version_1}, + {"ext_crypto_secp256k1_ecdsa_recover_compressed_version_1", ext_crypto_secp256k1_ecdsa_recover_compressed_version_1, C.ext_crypto_secp256k1_ecdsa_recover_compressed_version_1}, + {"ext_crypto_secp256k1_ecdsa_recover_compressed_version_2", ext_crypto_secp256k1_ecdsa_recover_compressed_version_2, C.ext_crypto_secp256k1_ecdsa_recover_compressed_version_2}, + {"ext_crypto_secp256k1_ecdsa_recover_version_1", ext_crypto_secp256k1_ecdsa_recover_version_1, C.ext_crypto_secp256k1_ecdsa_recover_version_1}, + {"ext_crypto_secp256k1_ecdsa_recover_version_2", ext_crypto_secp256k1_ecdsa_recover_version_2, C.ext_crypto_secp256k1_ecdsa_recover_version_2}, + {"ext_crypto_sr25519_generate_version_1", ext_crypto_sr25519_generate_version_1, C.ext_crypto_sr25519_generate_version_1}, + {"ext_crypto_sr25519_public_keys_version_1", ext_crypto_sr25519_public_keys_version_1, C.ext_crypto_sr25519_public_keys_version_1}, + {"ext_crypto_sr25519_sign_version_1", ext_crypto_sr25519_sign_version_1, C.ext_crypto_sr25519_sign_version_1}, + {"ext_crypto_sr25519_verify_version_1", ext_crypto_sr25519_verify_version_1, C.ext_crypto_sr25519_verify_version_1}, + {"ext_crypto_sr25519_verify_version_2", ext_crypto_sr25519_verify_version_2, C.ext_crypto_sr25519_verify_version_2}, + {"ext_crypto_start_batch_verify_version_1", ext_crypto_start_batch_verify_version_1, C.ext_crypto_start_batch_verify_version_1}, + {"ext_default_child_storage_clear_prefix_version_1", ext_default_child_storage_clear_prefix_version_1, C.ext_default_child_storage_clear_prefix_version_1}, + {"ext_default_child_storage_clear_version_1", ext_default_child_storage_clear_version_1, C.ext_default_child_storage_clear_version_1}, + {"ext_default_child_storage_exists_version_1", ext_default_child_storage_exists_version_1, C.ext_default_child_storage_exists_version_1}, + {"ext_default_child_storage_get_version_1", ext_default_child_storage_get_version_1, C.ext_default_child_storage_get_version_1}, + {"ext_default_child_storage_next_key_version_1", ext_default_child_storage_next_key_version_1, C.ext_default_child_storage_next_key_version_1}, + {"ext_default_child_storage_read_version_1", ext_default_child_storage_read_version_1, C.ext_default_child_storage_read_version_1}, + {"ext_default_child_storage_root_version_1", ext_default_child_storage_root_version_1, C.ext_default_child_storage_root_version_1}, + {"ext_default_child_storage_set_version_1", ext_default_child_storage_set_version_1, C.ext_default_child_storage_set_version_1}, + {"ext_default_child_storage_storage_kill_version_1", ext_default_child_storage_storage_kill_version_1, C.ext_default_child_storage_storage_kill_version_1}, + {"ext_default_child_storage_storage_kill_version_2", ext_default_child_storage_storage_kill_version_2, C.ext_default_child_storage_storage_kill_version_2}, + {"ext_default_child_storage_storage_kill_version_3", ext_default_child_storage_storage_kill_version_3, C.ext_default_child_storage_storage_kill_version_3}, + {"ext_hashing_blake2_128_version_1", ext_hashing_blake2_128_version_1, C.ext_hashing_blake2_128_version_1}, + {"ext_hashing_blake2_256_version_1", ext_hashing_blake2_256_version_1, C.ext_hashing_blake2_256_version_1}, + {"ext_hashing_keccak_256_version_1", ext_hashing_keccak_256_version_1, C.ext_hashing_keccak_256_version_1}, + {"ext_hashing_sha2_256_version_1", ext_hashing_sha2_256_version_1, C.ext_hashing_sha2_256_version_1}, + {"ext_hashing_twox_128_version_1", ext_hashing_twox_128_version_1, C.ext_hashing_twox_128_version_1}, + {"ext_hashing_twox_256_version_1", ext_hashing_twox_256_version_1, C.ext_hashing_twox_256_version_1}, + {"ext_hashing_twox_64_version_1", ext_hashing_twox_64_version_1, C.ext_hashing_twox_64_version_1}, + {"ext_logging_log_version_1", ext_logging_log_version_1, C.ext_logging_log_version_1}, + {"ext_logging_max_level_version_1", ext_logging_max_level_version_1, C.ext_logging_max_level_version_1}, + {"ext_misc_print_hex_version_1", ext_misc_print_hex_version_1, C.ext_misc_print_hex_version_1}, + {"ext_misc_print_num_version_1", ext_misc_print_num_version_1, C.ext_misc_print_num_version_1}, + {"ext_misc_print_utf8_version_1", ext_misc_print_utf8_version_1, C.ext_misc_print_utf8_version_1}, + {"ext_misc_runtime_version_version_1", ext_misc_runtime_version_version_1, C.ext_misc_runtime_version_version_1}, + {"ext_offchain_http_request_add_header_version_1", ext_offchain_http_request_add_header_version_1, C.ext_offchain_http_request_add_header_version_1}, + {"ext_offchain_http_request_start_version_1", ext_offchain_http_request_start_version_1, C.ext_offchain_http_request_start_version_1}, + {"ext_offchain_index_set_version_1", ext_offchain_index_set_version_1, C.ext_offchain_index_set_version_1}, + {"ext_offchain_is_validator_version_1", ext_offchain_is_validator_version_1, C.ext_offchain_is_validator_version_1}, + {"ext_offchain_local_storage_clear_version_1", ext_offchain_local_storage_clear_version_1, C.ext_offchain_local_storage_clear_version_1}, + {"ext_offchain_local_storage_compare_and_set_version_1", ext_offchain_local_storage_compare_and_set_version_1, C.ext_offchain_local_storage_compare_and_set_version_1}, + {"ext_offchain_local_storage_get_version_1", ext_offchain_local_storage_get_version_1, C.ext_offchain_local_storage_get_version_1}, + {"ext_offchain_local_storage_set_version_1", ext_offchain_local_storage_set_version_1, C.ext_offchain_local_storage_set_version_1}, + {"ext_offchain_network_state_version_1", ext_offchain_network_state_version_1, C.ext_offchain_network_state_version_1}, + {"ext_offchain_random_seed_version_1", ext_offchain_random_seed_version_1, C.ext_offchain_random_seed_version_1}, + {"ext_offchain_sleep_until_version_1", ext_offchain_sleep_until_version_1, C.ext_offchain_sleep_until_version_1}, + {"ext_offchain_submit_transaction_version_1", ext_offchain_submit_transaction_version_1, C.ext_offchain_submit_transaction_version_1}, + {"ext_offchain_timestamp_version_1", ext_offchain_timestamp_version_1, C.ext_offchain_timestamp_version_1}, + {"ext_sandbox_instance_teardown_version_1", ext_sandbox_instance_teardown_version_1, C.ext_sandbox_instance_teardown_version_1}, + {"ext_sandbox_instantiate_version_1", ext_sandbox_instantiate_version_1, C.ext_sandbox_instantiate_version_1}, + {"ext_sandbox_invoke_version_1", ext_sandbox_invoke_version_1, C.ext_sandbox_invoke_version_1}, + {"ext_sandbox_memory_get_version_1", ext_sandbox_memory_get_version_1, C.ext_sandbox_memory_get_version_1}, + {"ext_sandbox_memory_new_version_1", ext_sandbox_memory_new_version_1, C.ext_sandbox_memory_new_version_1}, + {"ext_sandbox_memory_set_version_1", ext_sandbox_memory_set_version_1, C.ext_sandbox_memory_set_version_1}, + {"ext_sandbox_memory_teardown_version_1", ext_sandbox_memory_teardown_version_1, C.ext_sandbox_memory_teardown_version_1}, + {"ext_storage_append_version_1", ext_storage_append_version_1, C.ext_storage_append_version_1}, + {"ext_storage_changes_root_version_1", ext_storage_changes_root_version_1, C.ext_storage_changes_root_version_1}, + {"ext_storage_clear_prefix_version_1", ext_storage_clear_prefix_version_1, C.ext_storage_clear_prefix_version_1}, + {"ext_storage_clear_prefix_version_2", ext_storage_clear_prefix_version_2, C.ext_storage_clear_prefix_version_2}, + {"ext_storage_clear_version_1", ext_storage_clear_version_1, C.ext_storage_clear_version_1}, + {"ext_storage_commit_transaction_version_1", ext_storage_commit_transaction_version_1, C.ext_storage_commit_transaction_version_1}, + {"ext_storage_exists_version_1", ext_storage_exists_version_1, C.ext_storage_exists_version_1}, + {"ext_storage_get_version_1", ext_storage_get_version_1, C.ext_storage_get_version_1}, + {"ext_storage_next_key_version_1", ext_storage_next_key_version_1, C.ext_storage_next_key_version_1}, + {"ext_storage_read_version_1", ext_storage_read_version_1, C.ext_storage_read_version_1}, + {"ext_storage_rollback_transaction_version_1", ext_storage_rollback_transaction_version_1, C.ext_storage_rollback_transaction_version_1}, + {"ext_storage_root_version_1", ext_storage_root_version_1, C.ext_storage_root_version_1}, + {"ext_storage_root_version_2", ext_storage_root_version_2, C.ext_storage_root_version_2}, + {"ext_storage_set_version_1", ext_storage_set_version_1, C.ext_storage_set_version_1}, + {"ext_storage_start_transaction_version_1", ext_storage_start_transaction_version_1, C.ext_storage_start_transaction_version_1}, + {"ext_transaction_index_index_version_1", ext_transaction_index_index_version_1, C.ext_transaction_index_index_version_1}, + {"ext_transaction_index_renew_version_1", ext_transaction_index_renew_version_1, C.ext_transaction_index_renew_version_1}, + {"ext_trie_blake2_256_ordered_root_version_1", ext_trie_blake2_256_ordered_root_version_1, C.ext_trie_blake2_256_ordered_root_version_1}, + {"ext_trie_blake2_256_ordered_root_version_2", ext_trie_blake2_256_ordered_root_version_2, C.ext_trie_blake2_256_ordered_root_version_2}, + {"ext_trie_blake2_256_root_version_1", ext_trie_blake2_256_root_version_1, C.ext_trie_blake2_256_root_version_1}, + {"ext_trie_blake2_256_verify_proof_version_1", ext_trie_blake2_256_verify_proof_version_1, C.ext_trie_blake2_256_verify_proof_version_1}, + } { + _, err = imports.AppendFunction(toRegister.importName, toRegister.implementation, toRegister.cgoPointer) + if err != nil { + return nil, fmt.Errorf("importing function: %w", err) + } } return imports, nil From 8c16d4ece1e4b423b10f06da10beb74da212eb1d Mon Sep 17 00:00:00 2001 From: Kishan Sagathiya Date: Mon, 11 Jul 2022 20:44:45 +0530 Subject: [PATCH 44/48] If the direction is descending, prune from start. (#2662) A block request contain start block, end block, direction and max size. It could be the case that block between start and end are less than max size. In such cases, so far we were pruning block from the end while preparing a block response. The correct way however would be to prune from the start, if the direction is descending and prune from the end if the direction is ascending. Since, we were pruning blocks the wrong way, we were preparing a response which substrate was not expecting. And that used to result in substrate dropping us. This commit fixes that issue. Fixes #2595 --- dot/sync/message.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/dot/sync/message.go b/dot/sync/message.go index cef2f42e25..7baececb5e 100644 --- a/dot/sync/message.go +++ b/dot/sync/message.go @@ -338,8 +338,14 @@ func (s *Service) handleChainByHash(ancestor, descendant common.Hash, return nil, err } + // If the direction is descending, prune from the start. + // if the direction is ascending it should prune from the end. if uint(len(subchain)) > max { - subchain = subchain[:max] + if direction == network.Ascending { + subchain = subchain[:max] + } else { + subchain = subchain[uint(len(subchain))-max:] + } } data := make([]*types.BlockData, len(subchain)) From fe1e2f0b33500a19bc46902af7bfe27380f98ef3 Mon Sep 17 00:00:00 2001 From: Kishan Sagathiya Date: Tue, 12 Jul 2022 13:18:22 +0530 Subject: [PATCH 45/48] throw ErrProducerEquivocated only when both equivocatory blocks are created by primary block producer (#2596) --- dot/types/babe.go | 7 +- lib/babe/errors.go | 1 + lib/babe/verify.go | 37 +++++++-- lib/babe/verify_test.go | 165 ++++++++++++++++++++++++++++++---------- 4 files changed, 160 insertions(+), 50 deletions(-) diff --git a/dot/types/babe.go b/dot/types/babe.go index e0ebd30cf1..ab30b0f119 100644 --- a/dot/types/babe.go +++ b/dot/types/babe.go @@ -4,9 +4,12 @@ package types import ( + "errors" "fmt" ) +var ErrNoFirstPreDigest = errors.New("first digest item is not pre-digest") + // RandomnessLength is the length of the epoch randomness (32 bytes) const RandomnessLength = 32 @@ -107,7 +110,7 @@ func GetSlotFromHeader(header *Header) (uint64, error) { preDigest, ok := header.Digest.Types[0].Value().(PreRuntimeDigest) if !ok { - return 0, fmt.Errorf("first digest item is not pre-digest") + return 0, fmt.Errorf("%w: got %T", ErrNoFirstPreDigest, header.Digest.Types[0].Value()) } digest, err := DecodeBabePreDigest(preDigest.Data) @@ -140,7 +143,7 @@ func IsPrimary(header *Header) (bool, error) { preDigest, ok := header.Digest.Types[0].Value().(PreRuntimeDigest) if !ok { - return false, fmt.Errorf("first digest item is not pre-digest: type=%T", header.Digest.Types[0].Value()) + return false, fmt.Errorf("%w: got %T", ErrNoFirstPreDigest, header.Digest.Types[0].Value()) } digest, err := DecodeBabePreDigest(preDigest.Data) diff --git a/lib/babe/errors.go b/lib/babe/errors.go index b851f2c05a..3f50917477 100644 --- a/lib/babe/errors.go +++ b/lib/babe/errors.go @@ -84,6 +84,7 @@ var ( errServicePaused = errors.New("service paused") errInvalidSlotTechnique = errors.New("invalid slot claiming technique") errNoBABEAuthorityKeyProvided = errors.New("cannot create BABE service as authority; no keypair provided") + errLastDigestItemNotSeal = errors.New("last digest item is not seal") other Other invalidCustom InvalidCustom diff --git a/lib/babe/verify.go b/lib/babe/verify.go index 50b59c2211..8957dd9995 100644 --- a/lib/babe/verify.go +++ b/lib/babe/verify.go @@ -262,12 +262,12 @@ func (b *verifier) verifyAuthorshipRight(header *types.Header) error { preDigest, ok := preDigestItem.Value().(types.PreRuntimeDigest) if !ok { - return fmt.Errorf("first digest item is not pre-digest") + return fmt.Errorf("%w: got %T", types.ErrNoFirstPreDigest, preDigestItem.Value()) } seal, ok := sealItem.Value().(types.SealDigest) if !ok { - return fmt.Errorf("last digest item is not seal") + return fmt.Errorf("%w: got %T", errLastDigestItemNotSeal, sealItem.Value()) } babePreDigest, err := b.verifyPreRuntimeDigest(&preDigest) @@ -329,29 +329,50 @@ func (b *verifier) verifyAuthorshipRight(header *types.Header) error { // hashes is hashes of all blocks with same block number as header.Number hashes := b.blockState.GetAllBlocksAtDepth(header.ParentHash) - for _, hash := range hashes { - currentHeader, err := b.blockState.GetHeader(hash) + for _, currentHash := range hashes { + currentHeader, err := b.blockState.GetHeader(currentHash) if err != nil { - continue + return fmt.Errorf("failed get header %s", err) } currentBlockProducerIndex, err := getAuthorityIndex(currentHeader) if err != nil { - continue + return fmt.Errorf("failed to get authority index %s", err) } + if len(currentHeader.Digest.Types) == 0 { + return fmt.Errorf("current header missing digest") + } + + currentPreDigestItem := currentHeader.Digest.Types[0] + currentPreDigest, ok := currentPreDigestItem.Value().(types.PreRuntimeDigest) + if !ok { + return fmt.Errorf("%w: got %T", types.ErrNoFirstPreDigest, currentPreDigestItem.Value()) + } + + currentBabePreDigest, err := b.verifyPreRuntimeDigest(¤tPreDigest) + if err != nil { + return fmt.Errorf("failed to verify pre-runtime digest: %w", err) + } + + _, isCurrentBlockProducerPrimary := currentBabePreDigest.(types.BabePrimaryPreDigest) + + var isExistingBlockProducerPrimary bool var existingBlockProducerIndex uint32 switch d := babePreDigest.(type) { case types.BabePrimaryPreDigest: existingBlockProducerIndex = d.AuthorityIndex + isExistingBlockProducerPrimary = true case types.BabeSecondaryVRFPreDigest: existingBlockProducerIndex = d.AuthorityIndex case types.BabeSecondaryPlainPreDigest: existingBlockProducerIndex = d.AuthorityIndex } - // same authority won't produce two different blocks at the same block number - if currentBlockProducerIndex == existingBlockProducerIndex && hash != header.Hash() { + // same authority won't produce two different blocks at the same block number as primary block producer + if currentBlockProducerIndex == existingBlockProducerIndex && + !currentHash.Equal(header.Hash()) && + isCurrentBlockProducerPrimary == isExistingBlockProducerPrimary { return ErrProducerEquivocated } } diff --git a/lib/babe/verify_test.go b/lib/babe/verify_test.go index d8c8432ebf..9b2adf9e8b 100644 --- a/lib/babe/verify_test.go +++ b/lib/babe/verify_test.go @@ -470,9 +470,6 @@ func Test_verifier_verifyAuthorshipRight(t *testing.T) { ctrl := gomock.NewController(t) mockBlockState := NewMockBlockState(ctrl) mockBlockStateErr := NewMockBlockState(ctrl) - mockBlockStateEquiv1 := NewMockBlockState(ctrl) - mockBlockStateEquiv2 := NewMockBlockState(ctrl) - mockBlockStateEquiv3 := NewMockBlockState(ctrl) //Generate keys kp, err := sr25519.GenerateKeypair() @@ -547,14 +544,6 @@ func Test_verifier_verifyAuthorshipRight(t *testing.T) { mockBlockStateErr.EXPECT().GetAllBlocksAtDepth(gomock.Any()).Return(h1) mockBlockStateErr.EXPECT().GetHeader(h).Return(nil, errors.New("get header error")) - mockBlockStateEquiv1.EXPECT().GetAllBlocksAtDepth(gomock.Any()).Return(h1) - mockBlockStateEquiv1.EXPECT().GetHeader(h).Return(testHeaderPrimary, nil) - - mockBlockStateEquiv2.EXPECT().GetAllBlocksAtDepth(gomock.Any()).Return(h1) - mockBlockStateEquiv2.EXPECT().GetHeader(h).Return(testSecPlainHeader, nil) - mockBlockStateEquiv3.EXPECT().GetAllBlocksAtDepth(gomock.Any()).Return(h1) - mockBlockStateEquiv3.EXPECT().GetHeader(h).Return(testSecVrfHeader, nil) - // Case 0: First element not preruntime digest header0 := newTestHeader(t, testInvalidSeal, testInvalidSeal) @@ -613,27 +602,6 @@ func Test_verifier_verifyAuthorshipRight(t *testing.T) { //// Case 8: Get header error babeVerifier6 := newTestVerifier(t, kp, mockBlockStateErr, scale.MaxUint128, false) - // Case 9: Equivocate case primary - babeVerifier7 := newTestVerifier(t, kp, mockBlockStateEquiv1, scale.MaxUint128, false) - - // Case 10: Equivocate case secondary plain - babeSecPlainPrd2, err := testBabeSecondaryPlainPreDigest.ToPreRuntimeDigest() - assert.NoError(t, err) - header8 := newTestHeader(t, *babeSecPlainPrd2) - - hash2 := encodeAndHashHeader(t, header8) - signAndAddSeal(t, kp, header8, hash2[:]) - babeVerifier8 := newTestVerifier(t, kp, mockBlockStateEquiv2, scale.MaxUint128, true) - - // Case 11: equivocation case secondary VRF - encVrfDigest := newEncodedBabeDigest(t, testBabeSecondaryVRFPreDigest) - assert.NoError(t, err) - header9 := newTestHeader(t, *types.NewBABEPreRuntimeDigest(encVrfDigest)) - - hash3 := encodeAndHashHeader(t, header9) - signAndAddSeal(t, kp, header9, hash3[:]) - babeVerifier9 := newTestVerifier(t, kp, mockBlockStateEquiv3, scale.MaxUint128, true) - tests := []struct { name string verifier verifier @@ -644,19 +612,19 @@ func Test_verifier_verifyAuthorshipRight(t *testing.T) { name: "missing digest", verifier: verifier{}, header: types.NewEmptyHeader(), - expErr: errors.New("block header is missing digest items"), + expErr: errMissingDigestItems, }, { name: "first digest invalid", verifier: verifier{}, header: header0, - expErr: errors.New("first digest item is not pre-digest"), + expErr: fmt.Errorf("%w: got types.SealDigest", types.ErrNoFirstPreDigest), }, { name: "last digest invalid", verifier: verifier{}, header: header1, - expErr: errors.New("last digest item is not seal"), + expErr: fmt.Errorf("%w: got types.PreRuntimeDigest", errLastDigestItemNotSeal), }, { name: "invalid preruntime digest data", @@ -692,28 +660,145 @@ func Test_verifier_verifyAuthorshipRight(t *testing.T) { name: "valid digest items, getAuthorityIndex error", verifier: *babeVerifier5, header: header7, + expErr: errors.New("failed to get authority index no digest provided"), }, { name: "get header err", verifier: *babeVerifier6, header: header7, + expErr: errors.New("failed get header get header error"), }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &tt.verifier + err := b.verifyAuthorshipRight(tt.header) + if tt.expErr != nil { + assert.EqualError(t, err, tt.expErr.Error()) + } else { + assert.NoError(t, err) + } + + }) + } +} + +func Test_verifier_verifyAuthorshipRightEquivocatory(t *testing.T) { + ctrl := gomock.NewController(t) + + mockBlockStateEquiv1 := NewMockBlockState(ctrl) + mockBlockStateEquiv2 := NewMockBlockState(ctrl) + mockBlockStateEquiv3 := NewMockBlockState(ctrl) + + //Generate keys + kp, err := sr25519.GenerateKeypair() + assert.NoError(t, err) + + output, proof, err := kp.VrfSign(makeTranscript(Randomness{}, uint64(1), 1)) + assert.NoError(t, err) + + testBabeSecondaryPlainPreDigest := types.BabeSecondaryPlainPreDigest{ + AuthorityIndex: 1, + SlotNumber: 1, + } + testBabeSecondaryVRFPreDigest := types.BabeSecondaryVRFPreDigest{ + AuthorityIndex: 1, + SlotNumber: 1, + VrfOutput: output, + VrfProof: proof, + } + + //BabePrimaryPreDigest case + secDigest1 := types.BabePrimaryPreDigest{ + SlotNumber: 1, + VRFOutput: output, + VRFProof: proof, + } + prd1, err := secDigest1.ToPreRuntimeDigest() + assert.NoError(t, err) + + auth := types.NewAuthority(kp.Public(), uint64(1)) + vi := &verifierInfo{ + authorities: []types.Authority{*auth, *auth}, + threshold: scale.MaxUint128, + } + + verifierEquivocatoryPrimary, err := newVerifier(mockBlockStateEquiv1, 1, vi) + assert.NoError(t, err) + + headerEquivocatoryPrimary := newTestHeader(t, *prd1) + hashEquivocatoryPrimary := encodeAndHashHeader(t, headerEquivocatoryPrimary) + signAndAddSeal(t, kp, headerEquivocatoryPrimary, hashEquivocatoryPrimary[:]) + + mockBlockStateEquiv1.EXPECT().GetAllBlocksAtDepth(headerEquivocatoryPrimary.ParentHash).Return( + []common.Hash{hashEquivocatoryPrimary}) + mockBlockStateEquiv1.EXPECT().GetHeader(hashEquivocatoryPrimary).Return(headerEquivocatoryPrimary, nil) + + // Secondary Plain Test Header + testParentPrd, err := testBabeSecondaryPlainPreDigest.ToPreRuntimeDigest() + assert.NoError(t, err) + testParentHeader := newTestHeader(t, *testParentPrd) + + testParentHash := encodeAndHashHeader(t, testParentHeader) + testSecondaryPrd, err := testBabeSecondaryPlainPreDigest.ToPreRuntimeDigest() + assert.NoError(t, err) + testSecPlainHeader := newTestHeader(t, *testSecondaryPrd) + testSecPlainHeader.ParentHash = testParentHash + + babeSecPlainPrd2, err := testBabeSecondaryPlainPreDigest.ToPreRuntimeDigest() + assert.NoError(t, err) + headerEquivocatorySecondaryPlain := newTestHeader(t, *babeSecPlainPrd2) + + hashEquivocatorySecondaryPlain := encodeAndHashHeader(t, headerEquivocatorySecondaryPlain) + signAndAddSeal(t, kp, headerEquivocatorySecondaryPlain, hashEquivocatorySecondaryPlain[:]) + babeVerifier8 := newTestVerifier(t, kp, mockBlockStateEquiv2, scale.MaxUint128, true) + + mockBlockStateEquiv2.EXPECT().GetAllBlocksAtDepth(headerEquivocatorySecondaryPlain.ParentHash).Return( + []common.Hash{hashEquivocatorySecondaryPlain}) + mockBlockStateEquiv2.EXPECT().GetHeader(hashEquivocatorySecondaryPlain).Return(headerEquivocatorySecondaryPlain, nil) + + // Secondary Vrf Test Header + encParentVrfDigest := newEncodedBabeDigest(t, testBabeSecondaryVRFPreDigest) + testParentVrfHeader := newTestHeader(t, *types.NewBABEPreRuntimeDigest(encParentVrfDigest)) + + testVrfParentHash := encodeAndHashHeader(t, testParentVrfHeader) + encVrfHeader := newEncodedBabeDigest(t, testBabeSecondaryVRFPreDigest) + testSecVrfHeader := newTestHeader(t, *types.NewBABEPreRuntimeDigest(encVrfHeader)) + testSecVrfHeader.ParentHash = testVrfParentHash + + encVrfDigest := newEncodedBabeDigest(t, testBabeSecondaryVRFPreDigest) + assert.NoError(t, err) + headerEquivocatorySecondaryVRF := newTestHeader(t, *types.NewBABEPreRuntimeDigest(encVrfDigest)) + + hashEquivocatorySecondaryVRF := encodeAndHashHeader(t, headerEquivocatorySecondaryVRF) + signAndAddSeal(t, kp, headerEquivocatorySecondaryVRF, hashEquivocatorySecondaryVRF[:]) + babeVerifierEquivocatorySecondaryVRF := newTestVerifier(t, kp, mockBlockStateEquiv3, scale.MaxUint128, true) + mockBlockStateEquiv3.EXPECT().GetAllBlocksAtDepth(headerEquivocatorySecondaryVRF.ParentHash).Return( + []common.Hash{hashEquivocatorySecondaryVRF}) + mockBlockStateEquiv3.EXPECT().GetHeader(hashEquivocatorySecondaryVRF).Return(headerEquivocatorySecondaryVRF, nil) + + tests := []struct { + name string + verifier verifier + header *types.Header + expErr error + }{ { name: "equivocate - primary", - verifier: *babeVerifier7, - header: header7, + verifier: *verifierEquivocatoryPrimary, + header: headerEquivocatoryPrimary, expErr: ErrProducerEquivocated, }, { name: "equivocate - secondary plain", verifier: *babeVerifier8, - header: header8, + header: headerEquivocatorySecondaryPlain, expErr: ErrProducerEquivocated, }, { name: "equivocate - secondary vrf", - verifier: *babeVerifier9, - header: header9, + verifier: *babeVerifierEquivocatorySecondaryVRF, + header: headerEquivocatorySecondaryVRF, expErr: ErrProducerEquivocated, }, } From 3a471d91f61c1ba10022ff11403b9cebb71c8666 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Tue, 12 Jul 2022 09:59:51 -0400 Subject: [PATCH 46/48] chore(ci): limit `TestSync_Basic` to 1 minute (#2663) --- tests/stress/stress_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/stress/stress_test.go b/tests/stress/stress_test.go index d4ad989d3a..8c6733f8e1 100644 --- a/tests/stress/stress_test.go +++ b/tests/stress/stress_test.go @@ -138,6 +138,8 @@ func TestSync_Basic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) nodes.InitAndStartTest(ctx, t, cancel) + ctx, cancel = context.WithTimeout(ctx, time.Minute) + defer cancel() const getChainHeadTimeout = time.Second From 0008b596156e359150436144bcfa3c69e35bba75 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Mon, 18 Jul 2022 10:37:44 -0400 Subject: [PATCH 47/48] fix(internal/log): log level `DoNotChange` (#2672) - No longer logs at trace level when replacing runtime --- internal/log/options.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/log/options.go b/internal/log/options.go index 2bfde14fb6..459fd46a21 100644 --- a/internal/log/options.go +++ b/internal/log/options.go @@ -15,6 +15,9 @@ type Option func(s *settings) // The level defaults to the lowest level, trce. func SetLevel(level Level) Option { return func(s *settings) { + if level == DoNotChange { + return + } s.level = &level } } From 055e5c3fde6d732ab9a3ae9bab3cb85938005e88 Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Mon, 18 Jul 2022 10:38:13 -0400 Subject: [PATCH 48/48] fix(tests): update block body regex in `TestChainRPC` (#2674) --- tests/rpc/rpc_03-chain_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/rpc/rpc_03-chain_test.go b/tests/rpc/rpc_03-chain_test.go index 107d17a911..abe59ec00f 100644 --- a/tests/rpc/rpc_03-chain_test.go +++ b/tests/rpc/rpc_03-chain_test.go @@ -102,7 +102,7 @@ func TestChainRPC(t *testing.T) { } block.Block.Header.Digest.Logs = nil assert.Len(t, block.Block.Body, 1) - const bodyRegex = `^0x280403000b[0-9a-z]{8}8101$` + const bodyRegex = `^0x280403000b[0-9a-z]{8}8201$` assert.Regexp(t, bodyRegex, block.Block.Body[0]) block.Block.Body = nil