Skip to content

Commit

Permalink
Block production summary in 100 node (#1876)
Browse files Browse the repository at this point in the history
![image](https://user-images.githubusercontent.com/13259400/71045504-0108a000-20ea-11ea-81ef-b298ce1d1e40.png)

Output is roughly as above. Each line in each txt above is in format:
block_hash block_height approvals is_chunk_included_all_in_current_height chunk_included_height
  • Loading branch information
ailisp authored Feb 24, 2020
1 parent fc4e268 commit 60f2da2
Show file tree
Hide file tree
Showing 16 changed files with 688 additions and 173 deletions.
3 changes: 3 additions & 0 deletions near/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ use near_jsonrpc::start_http;
use near_network::{NetworkRecipient, PeerManagerActor};
use near_store::create_store;
use near_telemetry::TelemetryActor;
use tracing::trace;

pub use crate::config::{init_configs, load_config, load_test_config, NearConfig, NEAR_BASE};
pub use crate::runtime::NightshadeRuntime;
Expand Down Expand Up @@ -110,5 +111,7 @@ pub fn start_with_config(

network_adapter.set_recipient(network_actor.recipient());

trace!(target: "diagnostic", key="log", "Starting NEAR node with diagnostic activated");

(client_actor, view_client)
}
5 changes: 2 additions & 3 deletions pytest/Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,8 @@ tornado = "*"
requests = "*"
ed25519 = "*"
base58 = "*"
google-api-python-client = "*"
six = "*" # requires by gcloud.py
python-rc = "==0.1.6"
python-rc = "==0.1.7"
tqdm = "*"
deepdiff = "*"

[dev-packages]
Expand Down
117 changes: 18 additions & 99 deletions pytest/Pipfile.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

142 changes: 89 additions & 53 deletions pytest/lib/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,21 @@ def get_account(self, acc, finality='optimistic'):

def get_block(self, block_hash):
return self.json_rpc('block', [block_hash])

def validators(self):
return set(map(lambda v: v['account_id'], self.get_status()['validators']))


class RpcNode(BaseNode):
""" A running node only interact by rpc queries """
def __init__(self, host, rpc_port):
super(RpcNode, self).__init__()
self.host = host
self.rpc_port = rpc_port

def rpc_addr(self):
return (self.host, self.rpc_port)


def get_changes(self, block_hash, state_changes_request):
return self.json_rpc('changes', [block_hash, state_changes_request])
Expand Down Expand Up @@ -208,30 +223,43 @@ class BotoNode(BaseNode):


class GCloudNode(BaseNode):
def __init__(self, instance_name, zone, node_dir, binary):
self.instance_name = instance_name
self.port = 24567
self.rpc_port = 3030
self.node_dir = node_dir
self.machine = gcloud.create(
name=instance_name,
machine_type='n1-standard-2',
disk_size='50G',
image_project='gce-uefi-images',
image_family='ubuntu-1804-lts',
zone=zone,
firewall_allows=['tcp:3030', 'tcp:24567'],
min_cpu_platform='Intel Skylake',
preemptible=False,
)
self.ip = self.machine.ip
self._upload_config_files(node_dir)
self._download_binary(binary)
with remote_nodes_lock:
global cleanup_remote_nodes_atexit_registered
if not cleanup_remote_nodes_atexit_registered:
atexit.register(atexit_cleanup_remote)
cleanup_remote_nodes_atexit_registered = True
def __init__(self, *args):
if len(args) == 1:
# Get existing instance assume it's ready to run
name = args[0]
self.instance_name = name
self.port = 24567
self.rpc_port = 3030
self.machine = gcloud.get(name)
self.ip = self.machine.ip
elif len(args) == 4:
# Create new instance from scratch
instance_name, zone, node_dir, binary = args
self.instance_name = instance_name
self.port = 24567
self.rpc_port = 3030
self.node_dir = node_dir
self.machine = gcloud.create(
name=instance_name,
machine_type='n1-standard-2',
disk_size='50G',
image_project='gce-uefi-images',
image_family='ubuntu-1804-lts',
zone=zone,
firewall_allows=['tcp:3030', 'tcp:24567'],
min_cpu_platform='Intel Skylake',
preemptible=False,
)
self.ip = self.machine.ip
self._upload_config_files(node_dir)
self._download_binary(binary)
with remote_nodes_lock:
global cleanup_remote_nodes_atexit_registered
if not cleanup_remote_nodes_atexit_registered:
atexit.register(atexit_cleanup_remote)
cleanup_remote_nodes_atexit_registered = True
else:
raise Exception()


def _upload_config_files(self, node_dir):
Expand Down Expand Up @@ -347,40 +375,48 @@ def init_cluster(num_nodes, num_observers, num_shards, config, genesis_config_ch

# apply config changes
for i, node_dir in enumerate(node_dirs):
# apply genesis_config.json changes
fname = os.path.join(node_dir, 'genesis.json')
with open(fname) as f:
genesis_config = json.loads(f.read())
for change in genesis_config_changes:
cur = genesis_config
for s in change[:-2]:
cur = cur[s]
assert change[-2] in cur
cur[change[-2]] = change[-1]
with open(fname, 'w') as f:
f.write(json.dumps(genesis_config, indent=2))

# apply config.json changes
fname = os.path.join(node_dir, 'config.json')
with open(fname) as f:
config_json = json.loads(f.read())

apply_genesis_changes(node_dir, genesis_config_changes)
if i in client_config_changes:
for k, v in client_config_changes[i].items():
assert k in config_json
if isinstance(v, dict):
for key, value in v.items():
assert key in config_json[k]
config_json[k][key] = value
else:
config_json[k] = v

with open(fname, 'w') as f:
f.write(json.dumps(config_json, indent=2))
client_config_change = client_config_changes[i]
apply_config_changes(node_dir, client_config_change)

return near_root, node_dirs


def apply_genesis_changes(node_dir, genesis_config_changes):
# apply genesis.json changes
fname = os.path.join(node_dir, 'genesis.json')
with open(fname) as f:
genesis_config = json.loads(f.read())
for change in genesis_config_changes:
cur = genesis_config
for s in change[:-2]:
cur = cur[s]
assert change[-2] in cur
cur[change[-2]] = change[-1]
with open(fname, 'w') as f:
f.write(json.dumps(genesis_config, indent=2))


def apply_config_changes(node_dir, client_config_change):
# apply config.json changes
fname = os.path.join(node_dir, 'config.json')
with open(fname) as f:
config_json = json.loads(f.read())

for k, v in client_config_change.items():
assert k in config_json
if isinstance(v, dict):
for key, value in v.items():
assert key in config_json[k]
config_json[k][key] = value
else:
config_json[k] = v

with open(fname, 'w') as f:
f.write(json.dumps(config_json, indent=2))


def start_cluster(num_nodes, num_observers, num_shards, config, genesis_config_changes, client_config_changes):
if not config:
config = load_config()
Expand Down
Loading

0 comments on commit 60f2da2

Please sign in to comment.