From 84b59884dbb2531830cbafa0cb3caf282a672a01 Mon Sep 17 00:00:00 2001 From: Ryan Date: Wed, 13 Sep 2023 11:43:34 +0200 Subject: [PATCH] refactor(swamp): use RPC client instead of service pointers (#2699) Replaces #2356 First step of #2337 --- nodebuilder/node.go | 2 + nodebuilder/tests/api_test.go | 48 +++++++++++--------- nodebuilder/tests/blob_test.go | 19 ++++---- nodebuilder/tests/fraud_test.go | 12 +++-- nodebuilder/tests/nd_test.go | 22 ++++++--- nodebuilder/tests/p2p_test.go | 39 ++++++++++++---- nodebuilder/tests/reconstruct_test.go | 39 ++++++++++------ nodebuilder/tests/sync_test.go | 64 ++++++++++++++++----------- 8 files changed, 158 insertions(+), 87 deletions(-) diff --git a/nodebuilder/node.go b/nodebuilder/node.go index 58a182276c..3e6950a6ae 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "github.com/cristalhq/jwt" "github.com/ipfs/boxo/blockservice" "github.com/ipfs/boxo/exchange" logging "github.com/ipfs/go-log/v2" @@ -48,6 +49,7 @@ type Node struct { Network p2p.Network Bootstrappers p2p.Bootstrappers Config *Config + AdminSigner jwt.Signer // rpc components RPCServer *rpc.Server // not optional diff --git a/nodebuilder/tests/api_test.go b/nodebuilder/tests/api_test.go index 0e225668bb..2fd4b2d3da 100644 --- a/nodebuilder/tests/api_test.go +++ b/nodebuilder/tests/api_test.go @@ -13,11 +13,27 @@ import ( "github.com/celestiaorg/celestia-node/api/rpc/client" "github.com/celestiaorg/celestia-node/blob" "github.com/celestiaorg/celestia-node/blob/blobtest" + "github.com/celestiaorg/celestia-node/libs/authtoken" "github.com/celestiaorg/celestia-node/nodebuilder" "github.com/celestiaorg/celestia-node/nodebuilder/node" "github.com/celestiaorg/celestia-node/nodebuilder/tests/swamp" ) +func getAdminClient(ctx context.Context, nd *nodebuilder.Node, t *testing.T) *client.Client { + t.Helper() + + signer := nd.AdminSigner + listenAddr := "ws://" + nd.RPCServer.ListenAddr() + + jwt, err := authtoken.NewSignedJWT(signer, []auth.Permission{"public", "read", "write", "admin"}) + require.NoError(t, err) + + client, err := client.NewClient(ctx, listenAddr, jwt) + require.NoError(t, err) + + return client +} + func TestNodeModule(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), swamp.DefaultTestTimeout) t.Cleanup(cancel) @@ -66,26 +82,20 @@ func TestGetByHeight(t *testing.T) { err := bridge.Start(ctx) require.NoError(t, err) - adminPerms := []auth.Permission{"public", "read", "write", "admin"} - jwt, err := bridge.AdminServ.AuthNew(ctx, adminPerms) - require.NoError(t, err) - - bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() - client, err := client.NewClient(ctx, bridgeAddr, jwt) - require.NoError(t, err) + rpcClient := getAdminClient(ctx, bridge, t) // let a few blocks be produced - _, err = client.Header.WaitForHeight(ctx, 3) + _, err = rpcClient.Header.WaitForHeight(ctx, 3) require.NoError(t, err) - networkHead, err := client.Header.NetworkHead(ctx) + networkHead, err := rpcClient.Header.NetworkHead(ctx) require.NoError(t, err) - _, err = client.Header.GetByHeight(ctx, networkHead.Height()+1) + _, err = rpcClient.Header.GetByHeight(ctx, networkHead.Height()+1) require.Nil(t, err, "Requesting syncer.Head()+1 shouldn't return an error") - networkHead, err = client.Header.NetworkHead(ctx) + networkHead, err = rpcClient.Header.NetworkHead(ctx) require.NoError(t, err) - _, err = client.Header.GetByHeight(ctx, networkHead.Height()+2) + _, err = rpcClient.Header.GetByHeight(ctx, networkHead.Height()+2) require.ErrorContains(t, err, "given height is from the future") } @@ -101,13 +111,7 @@ func TestBlobRPC(t *testing.T) { err := bridge.Start(ctx) require.NoError(t, err) - adminPerms := []auth.Permission{"public", "read", "write", "admin"} - jwt, err := bridge.AdminServ.AuthNew(ctx, adminPerms) - require.NoError(t, err) - - bridgeAddr := "http://" + bridge.RPCServer.ListenAddr() - client, err := client.NewClient(ctx, bridgeAddr, jwt) - require.NoError(t, err) + rpcClient := getAdminClient(ctx, bridge, t) appBlobs, err := blobtest.GenerateV0Blobs([]int{8}, false) require.NoError(t, err) @@ -119,7 +123,7 @@ func TestBlobRPC(t *testing.T) { ) require.NoError(t, err) - height, err := client.Blob.Submit(ctx, []*blob.Blob{newBlob}, nil) + height, err := rpcClient.Blob.Submit(ctx, []*blob.Blob{newBlob}, nil) require.NoError(t, err) require.True(t, height != 0) } @@ -147,9 +151,11 @@ func TestHeaderSubscription(t *testing.T) { err = light.Start(ctx) require.NoError(t, err) + lightClient := getAdminClient(ctx, light, t) + // subscribe to headers via the light node's RPC header subscription subctx, subcancel := context.WithCancel(ctx) - sub, err := light.HeaderServ.Subscribe(subctx) + sub, err := lightClient.Header.Subscribe(subctx) require.NoError(t, err) // listen for 5 headers for i := 0; i < 5; i++ { diff --git a/nodebuilder/tests/blob_test.go b/nodebuilder/tests/blob_test.go index 80fee071fb..2078fbfa74 100644 --- a/nodebuilder/tests/blob_test.go +++ b/nodebuilder/tests/blob_test.go @@ -55,12 +55,15 @@ func TestBlobModule(t *testing.T) { lightNode := sw.NewNodeWithConfig(node.Light, lightCfg) require.NoError(t, lightNode.Start(ctx)) - height, err := fullNode.BlobServ.Submit(ctx, blobs, nil) + fullClient := getAdminClient(ctx, fullNode, t) + lightClient := getAdminClient(ctx, lightNode, t) + + height, err := fullClient.Blob.Submit(ctx, blobs, nil) require.NoError(t, err) - _, err = fullNode.HeaderServ.WaitForHeight(ctx, height) + _, err = fullClient.Header.WaitForHeight(ctx, height) require.NoError(t, err) - _, err = lightNode.HeaderServ.WaitForHeight(ctx, height) + _, err = lightClient.Header.WaitForHeight(ctx, height) require.NoError(t, err) var test = []struct { @@ -70,7 +73,7 @@ func TestBlobModule(t *testing.T) { { name: "Get", doFn: func(t *testing.T) { - blob1, err := fullNode.BlobServ.Get(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) + blob1, err := fullClient.Blob.Get(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) require.NoError(t, err) require.Equal(t, blobs[0], blob1) }, @@ -78,7 +81,7 @@ func TestBlobModule(t *testing.T) { { name: "GetAll", doFn: func(t *testing.T) { - newBlobs, err := fullNode.BlobServ.GetAll(ctx, height, []share.Namespace{blobs[0].Namespace()}) + newBlobs, err := fullClient.Blob.GetAll(ctx, height, []share.Namespace{blobs[0].Namespace()}) require.NoError(t, err) require.Len(t, newBlobs, len(appBlobs0)) require.True(t, bytes.Equal(blobs[0].Commitment, newBlobs[0].Commitment)) @@ -88,10 +91,10 @@ func TestBlobModule(t *testing.T) { { name: "Included", doFn: func(t *testing.T) { - proof, err := fullNode.BlobServ.GetProof(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) + proof, err := fullClient.Blob.GetProof(ctx, height, blobs[0].Namespace(), blobs[0].Commitment) require.NoError(t, err) - included, err := lightNode.BlobServ.Included( + included, err := lightClient.Blob.Included( ctx, height, blobs[0].Namespace(), @@ -114,7 +117,7 @@ func TestBlobModule(t *testing.T) { ) require.NoError(t, err) - b, err := fullNode.BlobServ.Get(ctx, height, newBlob.Namespace(), newBlob.Commitment) + b, err := fullClient.Blob.Get(ctx, height, newBlob.Namespace(), newBlob.Commitment) assert.Nil(t, b) require.Error(t, err) require.ErrorIs(t, err, blob.ErrBlobNotFound) diff --git a/nodebuilder/tests/fraud_test.go b/nodebuilder/tests/fraud_test.go index 95c702c0c0..cd5be80517 100644 --- a/nodebuilder/tests/fraud_test.go +++ b/nodebuilder/tests/fraud_test.go @@ -89,9 +89,11 @@ func TestFraudProofHandling(t *testing.T) { err = full.Start(ctx) require.NoError(t, err) + fullClient := getAdminClient(ctx, full, t) + // 5. subCtx, subCancel := context.WithCancel(ctx) - subscr, err := full.FraudServ.Subscribe(subCtx, byzantine.BadEncoding) + subscr, err := fullClient.Fraud.Subscribe(subCtx, byzantine.BadEncoding) require.NoError(t, err) select { case p := <-subscr: @@ -108,7 +110,7 @@ func TestFraudProofHandling(t *testing.T) { // lifecycles of each Module. // 6. syncCtx, syncCancel := context.WithTimeout(context.Background(), blockTime*5) - _, err = full.HeaderServ.WaitForHeight(syncCtx, 15) + _, err = fullClient.Header.WaitForHeight(syncCtx, 15) require.ErrorIs(t, err, context.DeadlineExceeded) syncCancel() @@ -118,10 +120,11 @@ func TestFraudProofHandling(t *testing.T) { lnStore := nodebuilder.MockStore(t, cfg) light := sw.NewNodeWithStore(node.Light, lnStore) require.NoError(t, light.Start(ctx)) + lightClient := getAdminClient(ctx, light, t) // 8. subCtx, subCancel = context.WithCancel(ctx) - subscr, err = light.FraudServ.Subscribe(subCtx, byzantine.BadEncoding) + subscr, err = lightClient.Fraud.Subscribe(subCtx, byzantine.BadEncoding) require.NoError(t, err) select { case p := <-subscr: @@ -135,7 +138,8 @@ func TestFraudProofHandling(t *testing.T) { // 9. fN := sw.NewNodeWithStore(node.Full, store) require.Error(t, fN.Start(ctx)) - proofs, err := fN.FraudServ.Get(ctx, byzantine.BadEncoding) + fNClient := getAdminClient(ctx, fN, t) + proofs, err := fNClient.Fraud.Get(ctx, byzantine.BadEncoding) require.NoError(t, err) require.NotNil(t, proofs) diff --git a/nodebuilder/tests/nd_test.go b/nodebuilder/tests/nd_test.go index cb02e2a178..64d672cddc 100644 --- a/nodebuilder/tests/nd_test.go +++ b/nodebuilder/tests/nd_test.go @@ -45,6 +45,9 @@ func TestShrexNDFromLights(t *testing.T) { err = light.Start(ctx) require.NoError(t, err) + bridgeClient := getAdminClient(ctx, bridge, t) + lightClient := getAdminClient(ctx, light, t) + // wait for chain to be filled require.NoError(t, <-fillDn) @@ -54,7 +57,7 @@ func TestShrexNDFromLights(t *testing.T) { // the block that actually has transactions. We can get this data from the // response returned by FillBlock. for i := 16; i < blocks; i++ { - h, err := bridge.HeaderServ.GetByHeight(ctx, uint64(i)) + h, err := bridgeClient.Header.GetByHeight(ctx, uint64(i)) require.NoError(t, err) reqCtx, cancel := context.WithTimeout(ctx, time.Second*5) @@ -62,9 +65,9 @@ func TestShrexNDFromLights(t *testing.T) { // ensure to fetch random namespace (not the reserved namespace) namespace := h.DAH.RowRoots[1][:share.NamespaceSize] - expected, err := bridge.ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + expected, err := bridgeClient.Share.GetSharesByNamespace(reqCtx, h.DAH, namespace) require.NoError(t, err) - got, err := light.ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + got, err := lightClient.Share.GetSharesByNamespace(reqCtx, h.DAH, namespace) require.NoError(t, err) require.True(t, len(got[0].Shares) > 0) @@ -113,12 +116,15 @@ func TestShrexNDFromLightsWithBadFulls(t *testing.T) { require.NoError(t, startFullNodes(ctx, fulls...)) require.NoError(t, light.Start(ctx)) + bridgeClient := getAdminClient(ctx, bridge, t) + lightClient := getAdminClient(ctx, light, t) + // wait for chain to fill up require.NoError(t, <-fillDn) // first 2 blocks are not filled with data for i := 3; i < blocks; i++ { - h, err := bridge.HeaderServ.GetByHeight(ctx, uint64(i)) + h, err := bridgeClient.Header.GetByHeight(ctx, uint64(i)) require.NoError(t, err) if len(h.DAH.RowRoots) != bsize*2 { @@ -133,16 +139,18 @@ func TestShrexNDFromLightsWithBadFulls(t *testing.T) { // ensure to fetch random namespace (not the reserved namespace) namespace := h.DAH.RowRoots[1][:share.NamespaceSize] - expected, err := bridge.ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + expected, err := bridgeClient.Share.GetSharesByNamespace(reqCtx, h.DAH, namespace) require.NoError(t, err) require.True(t, len(expected[0].Shares) > 0) // choose a random full to test - gotFull, err := fulls[len(fulls)/2].ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + fN := fulls[len(fulls)/2] + fnClient := getAdminClient(ctx, fN, t) + gotFull, err := fnClient.Share.GetSharesByNamespace(reqCtx, h.DAH, namespace) require.NoError(t, err) require.True(t, len(gotFull[0].Shares) > 0) - gotLight, err := light.ShareServ.GetSharesByNamespace(reqCtx, h.DAH, namespace) + gotLight, err := lightClient.Share.GetSharesByNamespace(reqCtx, h.DAH, namespace) require.NoError(t, err) require.True(t, len(gotLight[0].Shares) > 0) diff --git a/nodebuilder/tests/p2p_test.go b/nodebuilder/tests/p2p_test.go index 083712dfdd..d05846f40c 100644 --- a/nodebuilder/tests/p2p_test.go +++ b/nodebuilder/tests/p2p_test.go @@ -46,7 +46,10 @@ func TestBridgeNodeAsBootstrapper(t *testing.T) { require.NoError(t, nd.Start(ctx)) assert.Equal(t, *addr, nd.Bootstrappers[0]) // ensure that node is actually connected to BN - assert.True(t, nd.Host.Network().Connectedness(addr.ID) == network.Connected) + client := getAdminClient(ctx, nd, t) + connectedenss, err := client.P2P.Connectedness(ctx, addr.ID) + require.NoError(t, err) + assert.Equal(t, connectedenss, network.Connected) } } @@ -102,15 +105,22 @@ func TestFullDiscoveryViaBootstrapper(t *testing.T) { for index := range nodes { require.NoError(t, nodes[index].Start(ctx)) assert.Equal(t, *bootstrapper, nodes[index].Bootstrappers[0]) - assert.True(t, nodes[index].Host.Network().Connectedness(bootstrapper.ID) == network.Connected) + // ensure that node is actually connected to BN + client := getAdminClient(ctx, nodes[index], t) + connectedness, err := client.P2P.Connectedness(ctx, bootstrapper.ID) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) } for { if ctx.Err() != nil { t.Fatal(ctx.Err()) } - if light.Host.Network().Connectedness(host.InfoFromHost(full.Host).ID) == network.Connected { - // LN discovered FN successfully and is now connected + // LN discovered FN successfully and is now connected + client := getAdminClient(ctx, light, t) + connectedness, err := client.P2P.Connectedness(ctx, host.InfoFromHost(full.Host).ID) + require.NoError(t, err) + if connectedness == network.Connected { break } } @@ -158,11 +168,19 @@ func TestRestartNodeDiscovery(t *testing.T) { for index := 0; index < numFulls; index++ { nodes[index] = sw.NewNodeWithConfig(node.Full, fullCfg, nodesConfig) require.NoError(t, nodes[index].Start(ctx)) - assert.True(t, nodes[index].Host.Network().Connectedness(bridgeAddr.ID) == network.Connected) + client := getAdminClient(ctx, nodes[index], t) + connectedness, err := client.P2P.Connectedness(ctx, bridgeAddr.ID) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) } // ensure FNs are connected to each other - require.True(t, nodes[0].Host.Network().Connectedness(nodes[1].Host.ID()) == network.Connected) + fullClient1 := getAdminClient(ctx, nodes[0], t) + fullClient2 := getAdminClient(ctx, nodes[1], t) + + connectedness, err := fullClient1.P2P.Connectedness(ctx, nodes[1].Host.ID()) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) // disconnect the FNs sw.Disconnect(t, nodes[0], nodes[1]) @@ -175,8 +193,13 @@ func TestRestartNodeDiscovery(t *testing.T) { // ensure that the FN with disabled discovery is discovered by both of the // running FNs that have discovery enabled - require.True(t, nodes[0].Host.Network().Connectedness(disabledDiscoveryFN.Host.ID()) == network.Connected) - require.True(t, nodes[1].Host.Network().Connectedness(disabledDiscoveryFN.Host.ID()) == network.Connected) + connectedness, err = fullClient1.P2P.Connectedness(ctx, disabledDiscoveryFN.Host.ID()) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) + + connectedness, err = fullClient2.P2P.Connectedness(ctx, disabledDiscoveryFN.Host.ID()) + require.NoError(t, err) + assert.Equal(t, connectedness, network.Connected) } func setTimeInterval(cfg *nodebuilder.Config, interval time.Duration) { diff --git a/nodebuilder/tests/reconstruct_test.go b/nodebuilder/tests/reconstruct_test.go index d8640c5249..3c2e8b1f83 100644 --- a/nodebuilder/tests/reconstruct_test.go +++ b/nodebuilder/tests/reconstruct_test.go @@ -53,10 +53,11 @@ func TestFullReconstructFromBridge(t *testing.T) { bridge := sw.NewBridgeNode() err := bridge.Start(ctx) require.NoError(t, err) + bridgeClient := getAdminClient(ctx, bridge, t) // TODO: This is required to avoid flakes coming from unfinished retry // mechanism for the same peer in go-header - _, err = bridge.HeaderServ.WaitForHeight(ctx, uint64(blocks)) + _, err = bridgeClient.Header.WaitForHeight(ctx, uint64(blocks)) require.NoError(t, err) cfg := nodebuilder.DefaultConfig(node.Full) @@ -65,17 +66,18 @@ func TestFullReconstructFromBridge(t *testing.T) { full := sw.NewNodeWithConfig(node.Full, cfg) err = full.Start(ctx) require.NoError(t, err) + fullClient := getAdminClient(ctx, full, t) errg, bctx := errgroup.WithContext(ctx) for i := 1; i <= blocks+1; i++ { i := i errg.Go(func() error { - h, err := full.HeaderServ.WaitForHeight(bctx, uint64(i)) + h, err := fullClient.Header.WaitForHeight(bctx, uint64(i)) if err != nil { return err } - return full.ShareServ.SharesAvailable(bctx, h.DAH) + return fullClient.Share.SharesAvailable(bctx, h.DAH) }) } require.NoError(t, <-fillDn) @@ -106,10 +108,11 @@ func TestFullReconstructFromFulls(t *testing.T) { sw.SetBootstrapper(t, bridge) require.NoError(t, bridge.Start(ctx)) + bridgeClient := getAdminClient(ctx, bridge, t) // TODO: This is required to avoid flakes coming from unfinished retry // mechanism for the same peer in go-header - _, err := bridge.HeaderServ.WaitForHeight(ctx, uint64(blocks)) + _, err := bridgeClient.Header.WaitForHeight(ctx, uint64(blocks)) require.NoError(t, err) lights1 := make([]*nodebuilder.Node, lnodes/2) @@ -175,6 +178,9 @@ func TestFullReconstructFromFulls(t *testing.T) { require.NoError(t, full1.Start(ctx)) require.NoError(t, full2.Start(ctx)) + fullClient1 := getAdminClient(ctx, full1, t) + fullClient2 := getAdminClient(ctx, full2, t) + // Form topology for i := 0; i < lnodes/2; i++ { // Separate light nodes into two subnetworks @@ -198,17 +204,17 @@ func TestFullReconstructFromFulls(t *testing.T) { sw.Disconnect(t, full1, bridge) sw.Disconnect(t, full2, bridge) - h, err := full1.HeaderServ.WaitForHeight(ctx, uint64(10+blocks-1)) + h, err := fullClient1.Header.WaitForHeight(ctx, uint64(10+blocks-1)) require.NoError(t, err) // Ensure that the full nodes cannot reconstruct before being connected to each other ctxErr, cancelErr := context.WithTimeout(ctx, time.Second*30) errg, errCtx = errgroup.WithContext(ctxErr) errg.Go(func() error { - return full1.ShareServ.SharesAvailable(errCtx, h.DAH) + return fullClient1.Share.SharesAvailable(errCtx, h.DAH) }) errg.Go(func() error { - return full2.ShareServ.SharesAvailable(errCtx, h.DAH) + return fullClient1.Share.SharesAvailable(errCtx, h.DAH) }) require.Error(t, errg.Wait()) cancelErr() @@ -218,13 +224,13 @@ func TestFullReconstructFromFulls(t *testing.T) { errg, bctx := errgroup.WithContext(ctx) for i := 10; i < blocks+11; i++ { - h, err := full1.HeaderServ.WaitForHeight(bctx, uint64(i)) + h, err := fullClient1.Header.WaitForHeight(bctx, uint64(i)) require.NoError(t, err) errg.Go(func() error { - return full1.ShareServ.SharesAvailable(bctx, h.DAH) + return fullClient1.Share.SharesAvailable(bctx, h.DAH) }) errg.Go(func() error { - return full2.ShareServ.SharesAvailable(bctx, h.DAH) + return fullClient2.Share.SharesAvailable(bctx, h.DAH) }) } @@ -278,12 +284,14 @@ func TestFullReconstructFromLights(t *testing.T) { } bootstrapper := sw.NewNodeWithConfig(node.Full, cfg) require.NoError(t, bootstrapper.Start(ctx)) - require.NoError(t, bridge.Start(ctx)) bootstrapperAddr := host.InfoFromHost(bootstrapper.Host) + require.NoError(t, bridge.Start(ctx)) + bridgeClient := getAdminClient(ctx, bridge, t) + // TODO: This is required to avoid flakes coming from unfinished retry // mechanism for the same peer in go-header - _, err = bridge.HeaderServ.WaitForHeight(ctx, uint64(blocks)) + _, err = bridgeClient.Header.WaitForHeight(ctx, uint64(blocks)) require.NoError(t, err) cfg = nodebuilder.DefaultConfig(node.Full) @@ -313,8 +321,11 @@ func TestFullReconstructFromLights(t *testing.T) { return light.Start(errCtx) }) } + require.NoError(t, errg.Wait()) require.NoError(t, full.Start(ctx)) + fullClient := getAdminClient(ctx, full, t) + for i := 0; i < lnodes; i++ { select { case <-ctx.Done(): @@ -328,12 +339,12 @@ func TestFullReconstructFromLights(t *testing.T) { for i := 1; i <= blocks+1; i++ { i := i errg.Go(func() error { - h, err := full.HeaderServ.WaitForHeight(bctx, uint64(i)) + h, err := fullClient.Header.WaitForHeight(bctx, uint64(i)) if err != nil { return err } - return full.ShareServ.SharesAvailable(bctx, h.DAH) + return fullClient.Share.SharesAvailable(bctx, h.DAH) }) } require.NoError(t, <-fillDn) diff --git a/nodebuilder/tests/sync_test.go b/nodebuilder/tests/sync_test.go index 65db1332ff..0bb0e1c757 100644 --- a/nodebuilder/tests/sync_test.go +++ b/nodebuilder/tests/sync_test.go @@ -56,8 +56,9 @@ func TestSyncAgainstBridge_NonEmptyChain(t *testing.T) { // start bridge and wait for it to sync to 20 err := bridge.Start(ctx) require.NoError(t, err) + bridgeClient := getAdminClient(ctx, bridge, t) - h, err := bridge.HeaderServ.WaitForHeight(ctx, numBlocks) + h, err := bridgeClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) @@ -68,16 +69,17 @@ func TestSyncAgainstBridge_NonEmptyChain(t *testing.T) { // start light node and wait for it to sync 20 blocks err = light.Start(ctx) require.NoError(t, err) - h, err = light.HeaderServ.WaitForHeight(ctx, numBlocks) + lightClient := getAdminClient(ctx, light, t) + h, err = lightClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) // check that the light node has also sampled over the block at height 20 - err = light.ShareServ.SharesAvailable(ctx, h.DAH) + err = lightClient.Share.SharesAvailable(ctx, h.DAH) assert.NoError(t, err) // wait until the entire chain (up to network head) has been sampled - err = light.DASer.WaitCatchUp(ctx) + err = lightClient.DAS.WaitCatchUp(ctx) require.NoError(t, err) }) @@ -87,16 +89,17 @@ func TestSyncAgainstBridge_NonEmptyChain(t *testing.T) { // let full node sync 20 blocks err = full.Start(ctx) require.NoError(t, err) - h, err = full.HeaderServ.WaitForHeight(ctx, numBlocks) + fullClient := getAdminClient(ctx, full, t) + h, err = fullClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) // check to ensure the full node can sync the 20th block's data - err = full.ShareServ.SharesAvailable(ctx, h.DAH) + err = fullClient.Share.SharesAvailable(ctx, h.DAH) assert.NoError(t, err) // wait for full node to sync up the blocks from genesis -> network head. - err = full.DASer.WaitCatchUp(ctx) + err = fullClient.DAS.WaitCatchUp(ctx) require.NoError(t, err) }) @@ -144,7 +147,8 @@ func TestSyncAgainstBridge_EmptyChain(t *testing.T) { // start bridge and wait for it to sync to 20 err := bridge.Start(ctx) require.NoError(t, err) - h, err := bridge.HeaderServ.WaitForHeight(ctx, numBlocks) + bridgeClient := getAdminClient(ctx, bridge, t) + h, err := bridgeClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) @@ -155,16 +159,17 @@ func TestSyncAgainstBridge_EmptyChain(t *testing.T) { // start light node and wait for it to sync 20 blocks err = light.Start(ctx) require.NoError(t, err) - h, err = light.HeaderServ.WaitForHeight(ctx, numBlocks) + lightClient := getAdminClient(ctx, light, t) + h, err = lightClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) // check that the light node has also sampled over the block at height 20 - err = light.ShareServ.SharesAvailable(ctx, h.DAH) + err = lightClient.Share.SharesAvailable(ctx, h.DAH) assert.NoError(t, err) // wait until the entire chain (up to network head) has been sampled - err = light.DASer.WaitCatchUp(ctx) + err = lightClient.DAS.WaitCatchUp(ctx) require.NoError(t, err) }) @@ -174,16 +179,17 @@ func TestSyncAgainstBridge_EmptyChain(t *testing.T) { // let full node sync 20 blocks err = full.Start(ctx) require.NoError(t, err) - h, err = full.HeaderServ.WaitForHeight(ctx, numBlocks) + fullClient := getAdminClient(ctx, full, t) + h, err = fullClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) // check to ensure the full node can sync the 20th block's data - err = full.ShareServ.SharesAvailable(ctx, h.DAH) + err = fullClient.Share.SharesAvailable(ctx, h.DAH) assert.NoError(t, err) // wait for full node to sync up the blocks from genesis -> network head. - err = full.DASer.WaitCatchUp(ctx) + err = fullClient.DAS.WaitCatchUp(ctx) require.NoError(t, err) }) } @@ -219,7 +225,8 @@ func TestSyncStartStopLightWithBridge(t *testing.T) { // and let bridge node sync up 20 blocks err := bridge.Start(ctx) require.NoError(t, err) - h, err := bridge.HeaderServ.WaitForHeight(ctx, numBlocks) + bridgeClient := getAdminClient(ctx, bridge, t) + h, err := bridgeClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) @@ -228,7 +235,8 @@ func TestSyncStartStopLightWithBridge(t *testing.T) { // start light node and let it sync to 20 err = light.Start(ctx) require.NoError(t, err) - h, err = light.HeaderServ.WaitForHeight(ctx, numBlocks) + lightClient := getAdminClient(ctx, light, t) + h, err = lightClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) require.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) @@ -239,7 +247,7 @@ func TestSyncStartStopLightWithBridge(t *testing.T) { // ensure when light node comes back up, it can sync the remainder of the chain it // missed while sleeping - h, err = light.HeaderServ.WaitForHeight(ctx, 40) + h, err = lightClient.Header.WaitForHeight(ctx, 40) require.NoError(t, err) assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, 40)) @@ -284,7 +292,8 @@ func TestSyncLightAgainstFull(t *testing.T) { // start a bridge node and wait for it to sync up 20 blocks err := bridge.Start(ctx) require.NoError(t, err) - h, err := bridge.HeaderServ.WaitForHeight(ctx, numBlocks) + bridgeClient := getAdminClient(ctx, bridge, t) + h, err := bridgeClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) @@ -293,9 +302,10 @@ func TestSyncLightAgainstFull(t *testing.T) { // start FN and wait for it to sync up to head of BN err = full.Start(ctx) require.NoError(t, err) - bridgeHead, err := bridge.HeaderServ.LocalHead(ctx) + fullClient := getAdminClient(ctx, full, t) + bridgeHead, err := bridgeClient.Header.LocalHead(ctx) require.NoError(t, err) - _, err = full.HeaderServ.WaitForHeight(ctx, bridgeHead.Height()) + _, err = fullClient.Header.WaitForHeight(ctx, bridgeHead.Height()) require.NoError(t, err) assert.EqualValues(t, h.Commit.BlockID.Hash, sw.GetCoreBlockHashByHeight(ctx, numBlocks)) @@ -315,9 +325,10 @@ func TestSyncLightAgainstFull(t *testing.T) { // start LN and wait for it to sync up to network head against the head of the FN err = light.Start(ctx) require.NoError(t, err) - fullHead, err := full.HeaderServ.LocalHead(ctx) + lightClient := getAdminClient(ctx, light, t) + fullHead, err := fullClient.Header.LocalHead(ctx) require.NoError(t, err) - _, err = light.HeaderServ.WaitForHeight(ctx, fullHead.Height()) + _, err = lightClient.Header.WaitForHeight(ctx, fullHead.Height()) require.NoError(t, err) // wait for the core block filling process to exit @@ -359,7 +370,8 @@ func TestSyncLightWithTrustedPeers(t *testing.T) { // let it sync to network head err := bridge.Start(ctx) require.NoError(t, err) - _, err = bridge.HeaderServ.WaitForHeight(ctx, numBlocks) + bridgeClient := getAdminClient(ctx, bridge, t) + _, err = bridgeClient.Header.WaitForHeight(ctx, numBlocks) require.NoError(t, err) // create a FN with BN as trusted peer @@ -368,7 +380,8 @@ func TestSyncLightWithTrustedPeers(t *testing.T) { // let FN sync to network head err = full.Start(ctx) require.NoError(t, err) - err = full.HeaderServ.SyncWait(ctx) + fullClient := getAdminClient(ctx, full, t) + err = fullClient.Header.SyncWait(ctx) require.NoError(t, err) // add full node as a bootstrapper for the suite @@ -380,7 +393,8 @@ func TestSyncLightWithTrustedPeers(t *testing.T) { // let LN sync to network head err = light.Start(ctx) require.NoError(t, err) - err = light.HeaderServ.SyncWait(ctx) + lightClient := getAdminClient(ctx, light, t) + err = lightClient.Header.SyncWait(ctx) require.NoError(t, err) // wait for the core block filling process to exit