diff --git a/bittensor/_cli/__init__.py b/bittensor/_cli/__init__.py
index bd96ea98b9..a291ea3f2f 100644
--- a/bittensor/_cli/__init__.py
+++ b/bittensor/_cli/__init__.py
@@ -25,6 +25,7 @@
from . import cli_impl
from .commands import *
from typing import List, Optional
+from .naka_cli_impl import CLI as naka_CLI
console = bittensor.__console__
# Turn off rich console locals trace.
@@ -50,7 +51,13 @@ def __new__(
if config == None:
config = cli.config(args)
cli.check_config( config )
- return cli_impl.CLI( config = config)
+ if config.subtensor:
+ network = config.subtensor.get('network', bittensor.defaults.subtensor.network)
+
+ if network == 'finney':
+ return cli_impl.CLI( config = config)
+ elif network == 'nakamoto':
+ return naka_CLI(config=config)
@staticmethod
def config(args: List[str]) -> 'bittensor.config':
@@ -80,6 +87,7 @@ def config(args: List[str]) -> 'bittensor.config':
MetagraphCommand.add_args( cmd_parsers )
SetWeightsCommand.add_args( cmd_parsers )
NewColdkeyCommand.add_args( cmd_parsers )
+ NewHotkeyCommand.add_args( cmd_parsers )
ListSubnetsCommand.add_args( cmd_parsers )
RegenHotkeyCommand.add_args( cmd_parsers )
RegenColdkeyCommand.add_args( cmd_parsers )
diff --git a/bittensor/_cli/commands/list.py b/bittensor/_cli/commands/list.py
index 23bbf0b446..a349112f03 100644
--- a/bittensor/_cli/commands/list.py
+++ b/bittensor/_cli/commands/list.py
@@ -85,4 +85,5 @@ def add_args( parser: argparse.ArgumentParser ):
default=False,
)
list_parser.add_argument( '--no_version_checking', action='store_true', help='''Set false to stop cli version checking''', default = False )
- bittensor.wallet.add_args( list_parser )
\ No newline at end of file
+ bittensor.wallet.add_args( list_parser )
+ bittensor.subtensor.add_args( list_parser )
\ No newline at end of file
diff --git a/bittensor/_cli/commands/misc.py b/bittensor/_cli/commands/misc.py
index 3df6367a27..ab99ce811e 100644
--- a/bittensor/_cli/commands/misc.py
+++ b/bittensor/_cli/commands/misc.py
@@ -57,6 +57,7 @@ def add_args( parser: argparse.ArgumentParser ):
default='None',
)
help_parser.add_argument( '--no_version_checking', action='store_true', help='''Set false to stop cli version checking''', default = False )
+ bittensor.subtensor.add_args( help_parser )
class UpdateCommand:
@staticmethod
@@ -86,6 +87,7 @@ def add_args( parser: argparse.ArgumentParser ):
default=False,
)
update_parser.add_argument( '--no_version_checking', action='store_true', help='''Set false to stop cli version checking''', default = False )
+ bittensor.subtensor.add_args( update_parser )
class ListSubnetsCommand:
@staticmethod
@@ -96,7 +98,7 @@ def run (cli):
rows = []
total_neurons = 0
-
+
for subnet in subnets:
total_neurons += subnet.max_n
rows.append((
@@ -109,7 +111,7 @@ def run (cli):
str(subnet.validator_sequence_length),
str(subnet.tempo),
str(subnet.modality),
- str(list(subnet.connection_requirements.keys())),
+ str([cr[1] for cr in subnet.connection_requirements]),
str(subnet.emission_value),
))
diff --git a/bittensor/_cli/commands/query.py b/bittensor/_cli/commands/query.py
index de9aff3d82..fe1a6575d2 100644
--- a/bittensor/_cli/commands/query.py
+++ b/bittensor/_cli/commands/query.py
@@ -28,8 +28,12 @@ def run (cli):
subtensor = bittensor.subtensor( config = cli.config )
# Verify subnet exists
+ if not hasattr(cli.config, 'netuid'):
+ bittensor.__console__.print(f"[red]Please specify subnet with --netuid.[/red]")
+ sys.exit(1)
+
if not subtensor.subnet_exists( netuid = cli.config.netuid ):
- bittensor.__console__.print(f"[red]Subnet {cli.config.netuid} does not exist[/red]")
+ bittensor.__console__.print(f"[red]Subnet {cli.config.netuid} does not exist.[/red]")
sys.exit(1)
dendrite = bittensor.dendrite( wallet = wallet )
@@ -37,14 +41,25 @@ def run (cli):
for uid in cli.config.uids:
neuron = subtensor.neuron_for_uid( uid = uid, netuid = cli.config.netuid )
endpoint = bittensor.endpoint.from_neuron( neuron )
- _, c, t = dendrite.forward_text( endpoints = endpoint, inputs = 'hello world')
- latency = "{}".format(t.tolist()[0]) if c.tolist()[0] == 1 else 'N/A'
- bittensor.__console__.print("\tUid: [bold white]{}[/bold white]\n\tLatency: [bold white]{}[/bold white]\n\tCode: [bold {}]{}[/bold {}]\n\n".format(uid, latency, bittensor.utils.codes.code_to_loguru_color( c.item() ), bittensor.utils.codes.code_to_string( c.item() ), bittensor.utils.codes.code_to_loguru_color( c.item() )), highlight=True)
+ _, c, t = dendrite.text( endpoints = endpoint, inputs = 'hello world', synapses = [bittensor.synapse.TextCausalLMNext()])
+ latency = "{}".format(t[0].tolist()[0]) if c[0].tolist()[0] == 1 else 'N/A'
+ bittensor.__console__.print("\tUid: [bold white]{}[/bold white]\n\tLatency: [bold white]{}[/bold white]\n\tCode: [bold {}]{}[/bold {}]\n\n".format(
+ uid,
+ latency,
+ bittensor.utils.codes.code_to_loguru_color( c[0].item() ),
+ bittensor.utils.codes.code_to_string( c[0].item() ),
+ bittensor.utils.codes.code_to_loguru_color( c[0].item() )
+ ), highlight=True)
stats[uid] = latency
print (stats)
+ dendrite.__del__()
@staticmethod
def check_config( config: 'bittensor.Config' ):
+
+ if config.subtensor.get('network') == bittensor.defaults.subtensor.network and not config.no_prompt:
+ config.subtensor.network = Prompt.ask("Enter subtensor network", choices=bittensor.__networks__, default = bittensor.defaults.subtensor.network)
+
if config.wallet.get('name') == bittensor.defaults.wallet.name and not config.no_prompt:
wallet_name = Prompt.ask("Enter wallet name", default = bittensor.defaults.wallet.name)
config.wallet.name = str(wallet_name)
@@ -85,6 +100,12 @@ def add_args( parser: argparse.ArgumentParser ):
help='''Set true to avoid prompting the user.''',
default=False,
)
+ query_parser.add_argument(
+ '--netuid',
+ type=int,
+ help='netuid for subnet to serve this neuron on',
+ default=argparse.SUPPRESS,
+ )
query_parser.add_argument( '--no_version_checking', action='store_true', help='''Set false to stop cli version checking''', default = False )
bittensor.wallet.add_args( query_parser )
bittensor.subtensor.add_args( query_parser )
diff --git a/bittensor/_cli/commands/utils.py b/bittensor/_cli/commands/utils.py
index 7c7f2eb06e..d59691b0a1 100644
--- a/bittensor/_cli/commands/utils.py
+++ b/bittensor/_cli/commands/utils.py
@@ -36,28 +36,28 @@ def check_choice( self, value: str ) -> bool:
def check_netuid_set( config: 'bittensor.Config', subtensor: 'bittensor.Subtensor', allow_none: bool = False ):
-
- all_netuids = [str(netuid) for netuid in subtensor.get_subnets()]
- if len(all_netuids) == 0:
- console.print(":cross_mark:[red]There are no open networks.[/red]")
- sys.exit()
-
- # Make sure netuid is set.
- if config.get('netuid', 'notset') == 'notset':
- if not config.no_prompt:
- netuid = IntListPrompt.ask("Enter netuid", choices=all_netuids, default=str(all_netuids[0]))
+ if subtensor.network =='finney':
+ all_netuids = [str(netuid) for netuid in subtensor.get_subnets()]
+ if len(all_netuids) == 0:
+ console.print(":cross_mark:[red]There are no open networks.[/red]")
+ sys.exit()
+
+ # Make sure netuid is set.
+ if config.get('netuid', 'notset') == 'notset':
+ if not config.no_prompt:
+ netuid = IntListPrompt.ask("Enter netuid", choices=all_netuids, default=str(all_netuids[0]))
+ else:
+ netuid = str(bittensor.defaults.netuid) if not allow_none else 'None'
else:
- netuid = str(bittensor.defaults.netuid) if not allow_none else 'None'
- else:
- netuid = config.netuid
-
- if isinstance(netuid, str) and netuid.lower() in ['none'] and allow_none:
- config.netuid = None
- else:
- try:
- config.netuid = int(netuid)
- except ValueError:
- raise ValueError('netuid must be an integer or "None" (if applicable)')
+ netuid = config.netuid
+
+ if isinstance(netuid, str) and netuid.lower() in ['none'] and allow_none:
+ config.netuid = None
+ else:
+ try:
+ config.netuid = int(netuid)
+ except ValueError:
+ raise ValueError('netuid must be an integer or "None" (if applicable)')
def check_for_cuda_reg_config( config: 'bittensor.Config' ) -> None:
diff --git a/bittensor/_cli/commands/wallets.py b/bittensor/_cli/commands/wallets.py
index ffa8e16eb5..8cb9fc36c3 100644
--- a/bittensor/_cli/commands/wallets.py
+++ b/bittensor/_cli/commands/wallets.py
@@ -118,6 +118,7 @@ def add_args( parser: argparse.ArgumentParser ):
help='''Overwrite the old coldkey with the newly generated coldkey'''
)
bittensor.wallet.add_args( regen_coldkey_parser )
+ bittensor.subtensor.add_args( regen_coldkey_parser )
class RegenColdkeypubCommand:
@@ -180,6 +181,7 @@ def add_args( parser: argparse.ArgumentParser ):
help='''Overwrite the old coldkeypub file with the newly generated coldkeypub'''
)
bittensor.wallet.add_args( regen_coldkeypub_parser )
+ bittensor.subtensor.add_args( regen_coldkeypub_parser )
class RegenHotkeyCommand:
@@ -282,6 +284,7 @@ def add_args( parser: argparse.ArgumentParser ):
help='''Overwrite the old hotkey with the newly generated hotkey'''
)
bittensor.wallet.add_args( regen_hotkey_parser )
+ bittensor.subtensor.add_args( regen_hotkey_parser )
@@ -339,6 +342,7 @@ def add_args( parser: argparse.ArgumentParser ):
help='''Overwrite the old hotkey with the newly generated hotkey'''
)
bittensor.wallet.add_args( new_hotkey_parser )
+ bittensor.subtensor.add_args( new_hotkey_parser )
class NewColdkeyCommand:
@@ -393,4 +397,5 @@ def add_args( parser: argparse.ArgumentParser ):
default=False,
help='''Overwrite the old coldkey with the newly generated coldkey'''
)
- bittensor.wallet.add_args( new_coldkey_parser )
\ No newline at end of file
+ bittensor.wallet.add_args( new_coldkey_parser )
+ bittensor.subtensor.add_args( new_coldkey_parser )
\ No newline at end of file
diff --git a/bittensor/_cli/naka_cli_impl.py b/bittensor/_cli/naka_cli_impl.py
new file mode 100644
index 0000000000..123033599d
--- /dev/null
+++ b/bittensor/_cli/naka_cli_impl.py
@@ -0,0 +1,901 @@
+# The MIT License (MIT)
+# Copyright © 2021 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import os
+import sys
+from typing import List, Union, Optional
+
+from cachetools import Cache
+
+import bittensor
+from bittensor.utils.balance import Balance
+from fuzzywuzzy import fuzz
+from rich import print
+from rich.prompt import Confirm
+from rich.table import Table
+from rich.tree import Tree
+from tqdm import tqdm
+
+
+class CLI:
+ """
+ Implementation of the CLI class, which handles the coldkey, hotkey and money transfer
+ """
+ def __init__(self, config: 'bittensor.Config' ):
+ r""" Initialized a bittensor.CLI object.
+ Args:
+ config (:obj:`bittensor.Config`, `required`):
+ bittensor.cli.config()
+ """
+ if config.get('no_version_checking') != None and not config.no_version_checking:
+ try:
+ bittensor.utils.version_checking()
+ except:
+ raise RuntimeError("To avoid internet based version checking pass --no_version_checking while running the CLI.")
+ self.config = config
+
+ def run ( self ):
+ """ Execute the command from config
+ """
+ if self.config.command == "run":
+ self.run_miner ()
+ elif self.config.command == "transfer":
+ self.transfer ()
+ elif self.config.command == "register":
+ self.register()
+ elif self.config.command == "unstake":
+ self.unstake()
+ elif self.config.command == "stake":
+ self.stake()
+ elif self.config.command == "overview":
+ self.overview()
+ elif self.config.command == "list":
+ self.list()
+ elif self.config.command == "new_coldkey":
+ self.create_new_coldkey()
+ elif self.config.command == "new_hotkey":
+ self.create_new_hotkey()
+ elif self.config.command == "regen_coldkey":
+ self.regen_coldkey()
+ elif self.config.command == "regen_coldkeypub":
+ self.regen_coldkeypub()
+ elif self.config.command == "regen_hotkey":
+ self.regen_hotkey()
+ elif self.config.command == "metagraph":
+ self.metagraph()
+ elif self.config.command == "weights":
+ self.weights()
+ elif self.config.command == "set_weights":
+ self.set_weights()
+ elif self.config.command == "inspect":
+ self.inspect()
+ elif self.config.command == "query":
+ self.query()
+ elif self.config.command == "help":
+ self.help()
+ elif self.config.command == 'update':
+ self.update()
+
+ def create_new_coldkey ( self ):
+ r""" Creates a new coldkey under this wallet.
+ """
+ wallet = bittensor.wallet(config = self.config)
+ wallet.create_new_coldkey( n_words = self.config.n_words, use_password = self.config.use_password, overwrite = self.config.overwrite_coldkey)
+
+ def create_new_hotkey ( self ):
+ r""" Creates a new hotke under this wallet.
+ """
+ wallet = bittensor.wallet(config = self.config)
+ wallet.create_new_hotkey( n_words = self.config.n_words, use_password = self.config.use_password, overwrite = self.config.overwrite_hotkey)
+
+ def regen_coldkey ( self ):
+ r""" Creates a new coldkey under this wallet.
+ """
+ wallet = bittensor.wallet(config = self.config)
+ wallet.regenerate_coldkey( mnemonic = self.config.mnemonic, seed = self.config.seed, use_password = self.config.use_password, overwrite = self.config.overwrite_coldkey )
+
+ def regen_coldkeypub ( self ):
+ r""" Creates a new coldkeypub under this wallet.
+ """
+ wallet = bittensor.wallet(config = self.config)
+ wallet.regenerate_coldkeypub( ss58_address=self.config.get('ss58_address'), public_key=self.config.get('public_key_hex'), overwrite = self.config.overwrite_coldkeypub )
+
+ def regen_hotkey ( self ):
+ r""" Creates a new coldkey under this wallet.
+ """
+ wallet = bittensor.wallet(config = self.config)
+ wallet.regenerate_hotkey( mnemonic = self.config.mnemonic, seed=self.config.seed, use_password = self.config.use_password, overwrite = self.config.overwrite_hotkey)
+
+ def query ( self ):
+ r""" Query an endpoint and get query time.
+ """
+ wallet = bittensor.wallet(config = self.config)
+ subtensor = bittensor.subtensor( config = self.config )
+ dendrite = bittensor.dendrite( wallet = wallet )
+ stats = {}
+ for uid in self.config.uids:
+ neuron = subtensor.neuron_for_uid( uid )
+ endpoint = bittensor.endpoint.from_neuron( neuron )
+ _, c, t = dendrite.text( endpoints = endpoint, inputs = 'hello world', synapses = [bittensor.synapse.TextCausalLMNext()])
+ latency = "{}".format(t[0].tolist()[0]) if c[0].tolist()[0] == 1 else 'N/A'
+ bittensor.__console__.print("\tUid: [bold white]{}[/bold white]\n\tLatency: [bold white]{}[/bold white]\n\tCode: [bold {}]{}[/bold {}]\n\n".format(
+ uid,
+ latency,
+ bittensor.utils.codes.code_to_loguru_color( c[0].item() ),
+ bittensor.utils.codes.code_to_string( c[0].item() ),
+ bittensor.utils.codes.code_to_loguru_color( c[0].item() )), highlight=True)
+ stats[uid] = latency
+ print (stats)
+ dendrite.__del__()
+
+ def inspect ( self ):
+ r""" Inspect a cold, hot pair.
+ """
+ wallet = bittensor.wallet(config = self.config)
+ subtensor = bittensor.subtensor( config = self.config )
+ dendrite = bittensor.dendrite( wallet = wallet )
+
+
+ with bittensor.__console__.status(":satellite: Looking up account on: [white]{}[/white] ...".format(self.config.subtensor.get('network', bittensor.defaults.subtensor.network))):
+
+ if self.config.wallet.get('hotkey', bittensor.defaults.wallet.hotkey) is None:
+ # If no hotkey is provided, inspect just the coldkey
+ wallet.coldkeypub
+ cold_balance = wallet.get_balance( subtensor = subtensor )
+ bittensor.__console__.print("\n[bold white]{}[/bold white]:\n {}[bold white]{}[/bold white]\n {} {}\n".format( wallet, "coldkey:".ljust(15), wallet.coldkeypub.ss58_address, " balance:".ljust(15), cold_balance.__rich__()), highlight=True)
+
+ else:
+ wallet.hotkey
+ wallet.coldkeypub
+ neuron = subtensor.neuron_for_pubkey( ss58_hotkey = wallet.hotkey.ss58_address )
+ endpoint = bittensor.endpoint.from_neuron( neuron )
+ if neuron.is_null:
+ registered = '[bold white]No[/bold white]'
+ stake = bittensor.Balance.from_tao( 0 )
+ emission = bittensor.Balance.from_rao( 0 )
+ latency = 'N/A'
+ else:
+ registered = '[bold white]Yes[/bold white]'
+ stake = bittensor.Balance.from_tao( neuron.stake )
+ emission = bittensor.Balance.from_rao( neuron.emission * 1000000000 )
+ synapses = [bittensor.synapse.TextLastHiddenState()]
+ _, c, t = dendrite.text( endpoints = endpoint, inputs = 'hello world', synapses=synapses)
+ latency = "{}".format((t[0]).tolist()[0]) if (c[0]).tolist()[0] == 1 else 'N/A'
+
+ cold_balance = wallet.get_balance( subtensor = subtensor )
+ bittensor.__console__.print("\n[bold white]{}[/bold white]:\n [bold grey]{}[bold white]{}[/bold white]\n {}[bold white]{}[/bold white]\n {}{}\n {}{}\n {}{}\n {}{}\n {}{}[/bold grey]".format( wallet, "coldkey:".ljust(15), wallet.coldkeypub.ss58_address, "hotkey:".ljust(15), wallet.hotkey.ss58_address, "registered:".ljust(15), registered, "balance:".ljust(15), cold_balance.__rich__(), "stake:".ljust(15), stake.__rich__(), "emission:".ljust(15), emission.__rich_rao__(), "latency:".ljust(15), latency ), highlight=True)
+
+
+ def run_miner ( self ):
+ self.config.to_defaults()
+ # Check coldkey.
+ wallet = bittensor.wallet( config = self.config )
+ if not wallet.coldkeypub_file.exists_on_device():
+ if Confirm.ask("Coldkey: [bold]'{}'[/bold] does not exist, do you want to create it".format(self.config.wallet.get('name', bittensor.defaults.wallet.name))):
+ wallet.create_new_coldkey()
+ else:
+ sys.exit()
+
+ # Check hotkey.
+ if not wallet.hotkey_file.exists_on_device():
+ if Confirm.ask("Hotkey: [bold]'{}'[/bold] does not exist, do you want to create it".format(self.config.wallet.hotkey)):
+ wallet.create_new_hotkey()
+ else:
+ sys.exit()
+
+ if wallet.hotkey_file.is_encrypted():
+ bittensor.__console__.print("Decrypting hotkey ... ")
+ wallet.hotkey
+
+ if wallet.coldkeypub_file.is_encrypted():
+ bittensor.__console__.print("Decrypting coldkeypub ... ")
+ wallet.coldkeypub
+
+ # Check registration
+ ## Will exit if --wallet.reregister is False
+ wallet.reregister()
+
+ # Run miner.
+ if self.config.model == 'core_server':
+
+ if self.config.synapse == 'TextLastHiddenState':
+ bittensor.neurons.core_server.neuron(lasthidden=True, causallm=False, seq2seq = False).run()
+ elif self.config.synapse == 'TextCausalLM':
+ bittensor.neurons.core_server.neuron(lasthidden=False, causallm=True, seq2seq = False).run()
+ elif self.config.synapse == 'TextSeq2Seq':
+ bittensor.neurons.core_server.neuron(lasthidden=False, causallm=False, seq2seq = True).run()
+ else:
+ bittensor.neurons.core_server.neuron().run()
+
+ elif self.config.model == 'core_validator':
+ bittensor.neurons.core_validator.neuron().run()
+ elif self.config.model == 'multitron_server':
+ bittensor.neurons.multitron_server.neuron().run()
+
+ def help ( self ):
+ self.config.to_defaults()
+
+ sys.argv = [sys.argv[0], '--help']
+
+ # Run miner.
+ if self.config.model == 'core_server':
+ bittensor.neurons.core_server.neuron().run()
+ elif self.config.model == 'core_validator':
+ bittensor.neurons.core_validator.neuron().run()
+ elif self.config.model == 'multitron_server':
+ bittensor.neurons.multitron_server.neuron().run()
+
+
+ def update ( self ):
+ if self.config.no_prompt or self.config.answer == 'Y':
+ os.system(' (cd ~/.bittensor/bittensor/ ; git checkout master ; git pull --ff-only )')
+ os.system('pip install -e ~/.bittensor/bittensor/')
+
+ def register( self ):
+ r""" Register neuron.
+ """
+ wallet = bittensor.wallet( config = self.config )
+ subtensor = bittensor.subtensor( config = self.config )
+ subtensor.register(
+ wallet = wallet,
+ prompt = not self.config.no_prompt,
+ TPB = self.config.subtensor.register.cuda.get('TPB', None),
+ update_interval = self.config.subtensor.register.get('update_interval', None),
+ num_processes = self.config.subtensor.register.get('num_processes', None),
+ cuda = self.config.subtensor.register.cuda.get('use_cuda', bittensor.defaults.subtensor.register.cuda.use_cuda),
+ dev_id = self.config.subtensor.register.cuda.get('dev_id', None),
+ output_in_place = self.config.subtensor.register.get('output_in_place', bittensor.defaults.subtensor.register.output_in_place),
+ log_verbose = self.config.subtensor.register.get('verbose', bittensor.defaults.subtensor.register.verbose),
+ )
+
+ def transfer( self ):
+ r""" Transfer token of amount to destination.
+ """
+ wallet = bittensor.wallet( config = self.config )
+ subtensor = bittensor.subtensor( config = self.config )
+ subtensor.transfer( wallet = wallet, dest = self.config.dest, amount = self.config.amount, wait_for_inclusion = True, prompt = not self.config.no_prompt )
+
+ def unstake( self ):
+ r""" Unstake token of amount from hotkey(s).
+ """
+ config = self.config.copy()
+ config.hotkey = None
+ wallet = bittensor.wallet( config = self.config )
+
+ subtensor: bittensor.subtensor = bittensor.subtensor( config = self.config )
+ wallets_to_unstake_from: List[bittensor.wallet]
+ if self.config.wallet.get('all_hotkeys'):
+ # Unstake from all hotkeys.
+ all_hotkeys: List[bittensor.wallet] = self._get_hotkey_wallets_for_wallet( wallet = wallet )
+ # Exclude hotkeys that are specified.
+ wallets_to_unstake_from = [
+ wallet for wallet in all_hotkeys if wallet.hotkey_str not in self.config.wallet.get('hotkeys', [])
+ ]
+
+ elif self.config.wallet.get('hotkeys'):
+ # Unstake from specific hotkeys.
+ wallets_to_unstake_from = [
+ bittensor.wallet( config = self.config, hotkey = hotkey ) for hotkey in self.config.wallet.get('hotkeys')
+ ]
+ else:
+ # Do regular unstake
+ subtensor.unstake( wallet, amount = None if self.config.get('unstake_all') else self.config.get('amount'), wait_for_inclusion = True, prompt = not self.config.no_prompt )
+ return None
+
+
+
+ final_wallets: List['bittensor.wallet'] = []
+ final_amounts: List[Union[float, Balance]] = []
+ for wallet in tqdm(wallets_to_unstake_from):
+ wallet: bittensor.wallet
+ if not wallet.is_registered():
+ # Skip unregistered hotkeys.
+ continue
+
+ unstake_amount_tao: float = self.config.get('amount')
+ if self.config.get('max_stake'):
+ wallet_stake: Balance = wallet.get_stake()
+ unstake_amount_tao: float = wallet_stake.tao - self.config.get('max_stake')
+ self.config.amount = unstake_amount_tao
+ if unstake_amount_tao < 0:
+ # Skip if max_stake is greater than current stake.
+ continue
+
+ final_wallets.append(wallet)
+ final_amounts.append(unstake_amount_tao)
+
+ # Ask to unstake
+ if not self.config.no_prompt:
+ if not Confirm.ask("Do you want to unstake from the following keys:\n" + \
+ "".join([
+ f" [bold white]- {wallet.hotkey_str}: {amount}𝜏[/bold white]\n" for wallet, amount in zip(final_wallets, final_amounts)
+ ])
+ ):
+ return None
+
+ subtensor.unstake_multiple( wallets = final_wallets, amounts = None if self.config.get('unstake_all') else final_amounts, wait_for_inclusion = True, prompt = False )
+
+
+ def stake( self ):
+ r""" Stake token of amount to hotkey(s).
+ """
+ config = self.config.copy()
+ config.hotkey = None
+ wallet = bittensor.wallet( config = config )
+
+ subtensor: bittensor.subtensor = bittensor.subtensor( config = self.config )
+ wallets_to_stake_to: List[bittensor.wallet]
+ if self.config.wallet.get('all_hotkeys'):
+ # Stake to all hotkeys.
+ all_hotkeys: List[bittensor.wallet] = self._get_hotkey_wallets_for_wallet( wallet = wallet )
+ # Exclude hotkeys that are specified.
+ wallets_to_stake_to = [
+ wallet for wallet in all_hotkeys if wallet.hotkey_str not in self.config.wallet.get('hotkeys', [])
+ ]
+
+ elif self.config.wallet.get('hotkeys'):
+ # Stake to specific hotkeys.
+ wallets_to_stake_to = [
+ bittensor.wallet( config = self.config, hotkey = hotkey ) for hotkey in self.config.wallet.get('hotkeys')
+ ]
+ else:
+ # Only self.config.wallet.hotkey is specified.
+ # so we stake to that single hotkey.
+ assert self.config.wallet.hotkey is not None
+ wallets_to_stake_to = [ bittensor.wallet( config = self.config ) ]
+
+ # Otherwise we stake to multiple wallets
+
+ wallet_0: 'bittensor.wallet' = wallets_to_stake_to[0]
+ # Decrypt coldkey for all wallet(s) to use
+ wallet_0.coldkey
+
+ # Get coldkey balance
+ wallet_balance: Balance = wallet_0.get_balance()
+ final_wallets: List['bittensor.wallet'] = []
+ final_amounts: List[Union[float, Balance]] = []
+ for wallet in tqdm(wallets_to_stake_to):
+ wallet: bittensor.wallet
+ if not wallet.is_registered():
+ # Skip unregistered hotkeys.
+ continue
+
+ # Assign decrypted coldkey from wallet_0
+ # so we don't have to decrypt again
+ wallet._coldkey = wallet_0._coldkey
+
+ stake_amount_tao: float = self.config.get('amount')
+ if self.config.get('max_stake'):
+ wallet_stake: Balance = wallet.get_stake()
+ stake_amount_tao: float = self.config.get('max_stake') - wallet_stake.tao
+
+ # If the max_stake is greater than the current wallet balance, stake the entire balance.
+ stake_amount_tao: float = min(stake_amount_tao, wallet_balance.tao)
+ if stake_amount_tao <= 0.00001: # Threshold because of fees, might create a loop otherwise
+ # Skip hotkey if max_stake is less than current stake.
+ continue
+ wallet_balance = Balance.from_tao(wallet_balance.tao - stake_amount_tao)
+ final_amounts.append(stake_amount_tao)
+ final_wallets.append(wallet)
+
+ if len(final_wallets) == 0:
+ # No wallets to stake to.
+ bittensor.__console__.print("Not enough balance to stake to any hotkeys or max_stake is less than current stake.")
+ return None
+
+ # Ask to stake
+ if not self.config.no_prompt:
+ if not Confirm.ask(f"Do you want to stake to the following keys from {wallet_0.name}:\n" + \
+ "".join([
+ f" [bold white]- {wallet.hotkey_str}: {amount}𝜏[/bold white]\n" for wallet, amount in zip(final_wallets, final_amounts)
+ ])
+ ):
+ return None
+
+ if len(final_wallets) == 1:
+ # do regular stake
+ return subtensor.add_stake( wallet=final_wallets[0], amount = None if self.config.get('stake_all') else final_amounts[0], wait_for_inclusion = True, prompt = not self.config.no_prompt )
+
+ subtensor.add_stake_multiple( wallets = final_wallets, amounts = None if self.config.get('stake_all') else final_amounts, wait_for_inclusion = True, prompt = False )
+
+
+ def set_weights( self ):
+ r""" Set weights and uids on chain.
+ """
+ wallet = bittensor.wallet( config = self.config )
+ subtensor = bittensor.subtensor( config = self.config )
+ subtensor.set_weights(
+ wallet,
+ uids = self.config.uids,
+ weights = self.config.weights,
+ wait_for_inclusion = True,
+ prompt = not self.config.no_prompt
+ )
+
+ @staticmethod
+ def _get_hotkey_wallets_for_wallet( wallet ) -> List['bittensor.wallet']:
+ hotkey_wallets = []
+ hotkeys_path = wallet.path + '/' + wallet.name + '/hotkeys'
+ try:
+ hotkey_files = next(os.walk(os.path.expanduser(hotkeys_path)))[2]
+ except StopIteration:
+ hotkey_files = []
+ for hotkey_file_name in hotkey_files:
+ try:
+ hotkey_for_name = bittensor.wallet( path = wallet.path, name = wallet.name, hotkey = hotkey_file_name )
+ if hotkey_for_name.hotkey_file.exists_on_device() and not hotkey_for_name.hotkey_file.is_encrypted():
+ hotkey_wallets.append( hotkey_for_name )
+ except Exception:
+ pass
+ return hotkey_wallets
+
+ @staticmethod
+ def _get_coldkey_wallets_for_path( path: str ) -> List['bittensor.wallet']:
+ try:
+ wallet_names = next(os.walk(os.path.expanduser(path)))[1]
+ return [ bittensor.wallet( path= path, name=name ) for name in wallet_names ]
+ except StopIteration:
+ # No wallet files found.
+ wallets = []
+ return wallets
+
+ @staticmethod
+ def _get_all_wallets_for_path( path:str ) -> List['bittensor.wallet']:
+ all_wallets = []
+ cold_wallets = CLI._get_coldkey_wallets_for_path(path)
+ for cold_wallet in cold_wallets:
+ if cold_wallet.coldkeypub_file.exists_on_device() and not cold_wallet.coldkeypub_file.is_encrypted():
+ all_wallets.extend( CLI._get_hotkey_wallets_for_wallet(cold_wallet) )
+ return all_wallets
+
+ def list(self):
+ r""" Lists wallets.
+ """
+ try:
+ wallets = next(os.walk(os.path.expanduser(self.config.wallet.path)))[1]
+ except StopIteration:
+ # No wallet files found.
+ wallets = []
+
+ root = Tree("Wallets")
+ for w_name in wallets:
+ wallet_for_name = bittensor.wallet( path = self.config.wallet.path, name = w_name)
+ try:
+ if wallet_for_name.coldkeypub_file.exists_on_device() and not wallet_for_name.coldkeypub_file.is_encrypted():
+ coldkeypub_str = wallet_for_name.coldkeypub.ss58_address
+ else:
+ coldkeypub_str = '?'
+ except:
+ coldkeypub_str = '?'
+
+ wallet_tree = root.add("\n[bold white]{} ({})".format(w_name, coldkeypub_str))
+ hotkeys_path = os.path.join(self.config.wallet.path, w_name, 'hotkeys')
+ try:
+ hotkeys = next(os.walk(os.path.expanduser(hotkeys_path)))
+ if len( hotkeys ) > 1:
+ for h_name in hotkeys[2]:
+ hotkey_for_name = bittensor.wallet( path = self.config.wallet.path, name = w_name, hotkey = h_name)
+ try:
+ if hotkey_for_name.hotkey_file.exists_on_device() and not hotkey_for_name.hotkey_file.is_encrypted():
+ hotkey_str = hotkey_for_name.hotkey.ss58_address
+ else:
+ hotkey_str = '?'
+ except:
+ hotkey_str = '?'
+ wallet_tree.add("[bold grey]{} ({})".format(h_name, hotkey_str))
+ except:
+ continue
+
+ if len(wallets) == 0:
+ root.add("[bold red]No wallets found.")
+ print(root)
+
+ def metagraph(self):
+ r""" Prints an entire metagraph.
+ """
+ console = bittensor.__console__
+ subtensor = bittensor.subtensor( config = self.config )
+ metagraph = bittensor.metagraph( network = 'nakamoto', subtensor = subtensor )
+ console.print(":satellite: Syncing with chain: [white]{}[/white] ...".format(self.config.subtensor.network))
+ metagraph.sync()
+ metagraph.save()
+ issuance = subtensor.total_issuance
+ difficulty = subtensor.difficulty
+
+ TABLE_DATA = []
+ total_stake = 0.0
+ total_rank = 0.0
+ total_trust = 0.0
+ total_consensus = 0.0
+ total_incentive = 0.0
+ total_dividends = 0.0
+ total_emission = 0
+ for uid in metagraph.uids:
+ ep = metagraph.endpoint_objs[uid]
+ row = [
+ str(ep.uid),
+ '{:.5f}'.format( metagraph.stake[uid]),
+ '{:.5f}'.format( metagraph.ranks[uid]),
+ '{:.5f}'.format( metagraph.trust[uid]),
+ '{:.5f}'.format( metagraph.consensus[uid]),
+ '{:.5f}'.format( metagraph.incentive[uid]),
+ '{:.5f}'.format( metagraph.dividends[uid]),
+ '{}'.format( int(metagraph.emission[uid] * 1000000000)),
+ str((metagraph.block.item() - metagraph.last_update[uid].item())),
+ str( metagraph.active[uid].item() ),
+ ep.ip + ':' + str(ep.port) if ep.is_serving else '[yellow]none[/yellow]',
+ ep.hotkey[:10],
+ ep.coldkey[:10]
+ ]
+ total_stake += metagraph.stake[uid]
+ total_rank += metagraph.ranks[uid]
+ total_trust += metagraph.trust[uid]
+ total_consensus += metagraph.consensus[uid]
+ total_incentive += metagraph.incentive[uid]
+ total_dividends += metagraph.dividends[uid]
+ total_emission += int(metagraph.emission[uid] * 1000000000)
+ TABLE_DATA.append(row)
+ total_neurons = len(metagraph.uids)
+ table = Table(show_footer=False)
+ table.title = (
+ "[white]Metagraph: name: {}, block: {}, N: {}/{}, tau: {}/block, stake: {}, issuance: {}, difficulty: {}".format(subtensor.network, metagraph.block.item(), sum(metagraph.active.tolist()), metagraph.n.item(), bittensor.Balance.from_tao(metagraph.tau.item()), bittensor.Balance.from_tao(total_stake), issuance, difficulty )
+ )
+ table.add_column("[overline white]UID", str(total_neurons), footer_style = "overline white", style='yellow')
+ table.add_column("[overline white]STAKE(\u03C4)", '\u03C4{:.5f}'.format(total_stake), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]RANK", '{:.5f}'.format(total_rank), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]TRUST", '{:.5f}'.format(total_trust), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]CONSENSUS", '{:.5f}'.format(total_consensus), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]INCENTIVE", '{:.5f}'.format(total_incentive), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]DIVIDENDS", '{:.5f}'.format(total_dividends), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]EMISSION(\u03C1)", '\u03C1{}'.format(int(total_emission)), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]UPDATED", justify='right', no_wrap=True)
+ table.add_column("[overline white]ACTIVE", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]AXON", justify='left', style='dim blue', no_wrap=True)
+ table.add_column("[overline white]HOTKEY", style='dim blue', no_wrap=False)
+ table.add_column("[overline white]COLDKEY", style='dim purple', no_wrap=False)
+ table.show_footer = True
+
+ for row in TABLE_DATA:
+ table.add_row(*row)
+ table.box = None
+ table.pad_edge = False
+ table.width = None
+ console.print(table)
+
+ def weights(self):
+ r""" Prints an weights to screen.
+ """
+ console = bittensor.__console__
+ subtensor = bittensor.subtensor( config = self.config )
+ metagraph = bittensor.metagraph( subtensor = subtensor ).sync()
+ wallet = bittensor.wallet( config = self.config )
+
+ table = Table()
+ rows = []
+ table.add_column("[bold white]uid", style='white', no_wrap=False)
+ for uid in metagraph.uids.tolist():
+ table.add_column("[bold white]{}".format(uid), style='white', no_wrap=False)
+ if self.config.all_weights:
+ rows.append(["[bold white]{}".format(uid) ] + ['{:.3f}'.format(v) for v in metagraph.W[uid].tolist()])
+ else:
+ if metagraph.coldkeys[uid] == wallet.coldkeypub.ss58_address:
+ if not self.config.all_hotkeys:
+ if metagraph.hotkeys[uid] == wallet.hotkey.ss58_address:
+ rows.append(["[bold white]{}".format(uid) ] + ['{:.3f}'.format(v) for v in metagraph.W[uid].tolist()])
+ else:
+ rows.append(["[bold white]{}".format(uid) ] + ['{:.3f}'.format(v) for v in metagraph.W[uid].tolist()])
+
+ for row in rows:
+ table.add_row(*row)
+ table.box = None
+ table.pad_edge = False
+ table.width = None
+ with console.pager():
+ console.print(table)
+
+ def overview(self):
+ r""" Prints an overview for the wallet's colkey.
+ """
+ console = bittensor.__console__
+ subtensor = bittensor.subtensor( config = self.config )
+
+ all_hotkeys = []
+ total_balance = bittensor.Balance(0)
+
+ # We are printing for every coldkey.
+ if self.config.all:
+ cold_wallets = CLI._get_coldkey_wallets_for_path(self.config.wallet.path)
+ for cold_wallet in tqdm(cold_wallets, desc="Pulling balances"):
+ if cold_wallet.coldkeypub_file.exists_on_device() and not cold_wallet.coldkeypub_file.is_encrypted():
+ total_balance = total_balance + subtensor.get_balance( cold_wallet.coldkeypub.ss58_address )
+ all_hotkeys = CLI._get_all_wallets_for_path( self.config.wallet.path )
+ else:
+ # We are only printing keys for a single coldkey
+ coldkey_wallet = bittensor.wallet( config = self.config )
+ if coldkey_wallet.coldkeypub_file.exists_on_device() and not coldkey_wallet.coldkeypub_file.is_encrypted():
+ total_balance = subtensor.get_balance( coldkey_wallet.coldkeypub.ss58_address )
+ if not coldkey_wallet.coldkeypub_file.exists_on_device():
+ console.print("[bold red]No wallets found.")
+ return
+ all_hotkeys = CLI._get_hotkey_wallets_for_wallet( coldkey_wallet )
+
+ # We are printing for a select number of hotkeys from all_hotkeys.
+
+ if self.config.wallet.get('hotkeys', []):
+ if not self.config.get('all_hotkeys', False):
+ # We are only showing hotkeys that are specified.
+ all_hotkeys = [hotkey for hotkey in all_hotkeys if hotkey.hotkey_str in self.config.wallet.hotkeys]
+ else:
+ # We are excluding the specified hotkeys from all_hotkeys.
+ all_hotkeys = [hotkey for hotkey in all_hotkeys if hotkey.hotkey_str not in self.config.wallet.hotkeys]
+
+ # Check we have keys to display.
+ if len(all_hotkeys) == 0:
+ console.print("[red]No wallets found.[/red]")
+ return
+
+ # Pull neuron info for all keys.
+ neurons = []
+ block = subtensor.block
+ with console.status(":satellite: Syncing with chain: [white]{}[/white] ...".format(self.config.subtensor.get('network', bittensor.defaults.subtensor.network))):
+ try:
+ if self.config.subtensor.get('network', bittensor.defaults.subtensor.network) not in ('local', 'nakamoto'):
+ # We only cache neurons for local/nakamoto.
+ raise CacheException("This network is not cached, defaulting to regular overview.")
+
+ if self.config.get('no_cache'):
+ raise CacheException("Flag was set to not use cache, defaulting to regular overview.")
+
+ metagraph: bittensor.Metagraph = bittensor.metagraph( subtensor = subtensor )
+ try:
+ # Grab cached neurons from IPFS
+ all_neurons = metagraph.retrieve_cached_neurons()
+ except Exception:
+ raise CacheException("Failed to retrieve cached neurons, defaulting to regular overview.")
+ # Map the hotkeys to uids
+ hotkey_to_neurons = {n.hotkey: n.uid for n in all_neurons}
+ for wallet in tqdm(all_hotkeys):
+ uid = hotkey_to_neurons.get(wallet.hotkey.ss58_address)
+ if uid is not None:
+ nn = all_neurons[uid]
+ neurons.append( (nn, wallet) )
+ except CacheException:
+ for wallet in tqdm(all_hotkeys):
+ # Get overview without cache
+ nn = subtensor.neuron_for_pubkey( wallet.hotkey.ss58_address )
+ if not nn.is_null:
+ neurons.append( (nn, wallet) )
+
+
+ TABLE_DATA = []
+ total_stake = 0.0
+ total_rank = 0.0
+ total_trust = 0.0
+ total_consensus = 0.0
+ total_incentive = 0.0
+ total_dividends = 0.0
+ total_emission = 0
+
+ for nn, hotwallet in tqdm(neurons):
+ uid = nn.uid
+ active = nn.active
+ stake = nn.stake
+ rank = nn.rank
+ trust = nn.trust
+ consensus = nn.consensus
+ incentive = nn.incentive
+ dividends = nn.dividends
+ emission = int(nn.emission * 1000000000)
+ last_update = int(block - nn.last_update)
+ row = [
+ hotwallet.name,
+ hotwallet.hotkey_str,
+ str(uid),
+ str(active),
+ '{:.5f}'.format(stake),
+ '{:.5f}'.format(rank),
+ '{:.5f}'.format(trust),
+ '{:.5f}'.format(consensus),
+ '{:.5f}'.format(incentive),
+ '{:.5f}'.format(dividends),
+ '{}'.format(emission),
+ str(last_update),
+ bittensor.utils.networking.int_to_ip( nn.ip) + ':' + str(nn.port) if nn.port != 0 else '[yellow]none[/yellow]',
+ nn.hotkey
+ ]
+ total_stake += stake
+ total_rank += rank
+ total_trust += trust
+ total_consensus += consensus
+ total_incentive += incentive
+ total_dividends += dividends
+ total_emission += emission
+ TABLE_DATA.append(row)
+
+ total_neurons = len(neurons)
+ table = Table(show_footer=False, width=self.config.get('width', None), pad_edge=False, box=None)
+ if not self.config.all:
+ table.title = ( "[white]Wallet - {}:{}".format(self.config.wallet.name, wallet.coldkeypub.ss58_address) )
+ else:
+ table.title = ( "[white]All Wallets:" )
+ table.add_column("[overline white]COLDKEY", str(total_neurons), footer_style = "overline white", style='bold white')
+ table.add_column("[overline white]HOTKEY", str(total_neurons), footer_style = "overline white", style='white')
+ table.add_column("[overline white]UID", str(total_neurons), footer_style = "overline white", style='yellow')
+ table.add_column("[overline white]ACTIVE", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]STAKE(\u03C4)", '\u03C4{:.5f}'.format(total_stake), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]RANK", '{:.5f}'.format(total_rank), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]TRUST", '{:.5f}'.format(total_trust), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]CONSENSUS", '{:.5f}'.format(total_consensus), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]INCENTIVE", '{:.5f}'.format(total_incentive), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]DIVIDENDS", '{:.5f}'.format(total_dividends), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]EMISSION(\u03C1)", '\u03C1{}'.format(int(total_emission)), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]UPDATED", justify='right', no_wrap=True)
+ table.add_column("[overline white]AXON", justify='left', style='dim blue', no_wrap=True)
+ table.add_column("[overline white]HOTKEY_SS58", style='dim blue', no_wrap=False)
+ table.show_footer = True
+ table.caption = "[white]Wallet balance: [green]\u03C4" + str(total_balance.tao)
+
+ console.clear()
+
+ sort_by: Optional[str] = self.config.get('sort_by', None)
+ sort_order: Optional[str] = self.config.get('sort_order', None)
+
+ if sort_by is not None and sort_by != "":
+ column_to_sort_by: int = 0
+ highest_matching_ratio: int = 0
+ sort_descending: bool = False # Default sort_order to ascending
+
+ for index, column in zip(range(len(table.columns)), table.columns):
+ # Fuzzy match the column name. Default to the first column.
+ column_name = column.header.lower().replace('[overline white]', '')
+ match_ratio = fuzz.ratio(sort_by.lower(), column_name)
+ # Finds the best matching column
+ if match_ratio > highest_matching_ratio:
+ highest_matching_ratio = match_ratio
+ column_to_sort_by = index
+
+ if sort_order.lower() in { 'desc', 'descending', 'reverse'}:
+ # Sort descending if the sort_order matches desc, descending, or reverse
+ sort_descending = True
+
+ def overview_sort_function(row):
+ data = row[column_to_sort_by]
+ # Try to convert to number if possible
+ try:
+ data = float(data)
+ except ValueError:
+ pass
+ return data
+
+ TABLE_DATA.sort(key=overview_sort_function, reverse=sort_descending)
+
+ for row in TABLE_DATA:
+ table.add_row(*row)
+
+ console.print(table, width=self.config.get('width', None))
+
+ def full(self):
+ r""" Prints an overview for the wallet's colkey.
+ """
+ all_wallets = CLI._get_all_wallets_for_path( self.config.wallet.path )
+ if len(all_wallets) == 0:
+ console.print("[red]No wallets found.[/red]")
+ return
+
+ console = bittensor.__console__
+ subtensor = bittensor.subtensor( config = self.config )
+ meta: bittensor.Metagraph = bittensor.metagraph( subtensor = subtensor )
+ # Get metagraph, use no_cache if flagged
+ meta.sync(cached = not self.config.get('no_cache', False))
+ neurons = []
+ block = subtensor.block
+
+ with console.status(":satellite: Syncing with chain: [white]{}[/white] ...".format(self.config.subtensor.network)):
+ balance = bittensor.Balance(0.0)
+
+ all_neurons = meta.neurons
+ # Map the hotkeys to uids
+ hotkey_to_neurons = {n.hotkey: n.uid for n in all_neurons}
+ for next_wallet in tqdm(all_wallets, desc="[white]Getting wallet balances"):
+ if len(next_wallet) == 0:
+ # Skip wallets with no hotkeys
+ continue
+
+ for hotkey_wallet in next_wallet:
+ uid = hotkey_to_neurons.get(hotkey_wallet.hotkey.ss58_address)
+ if uid is not None:
+ nn = all_neurons[uid]
+ neurons.append( (nn, hotkey_wallet) )
+
+ balance += subtensor.get_balance( next_wallet[0].coldkeypub.ss58_address )
+
+ TABLE_DATA = []
+ total_stake = 0.0
+ total_rank = 0.0
+ total_trust = 0.0
+ total_consensus = 0.0
+ total_incentive = 0.0
+ total_dividends = 0.0
+ total_emission = 0
+ for nn, hotwallet in tqdm(neurons):
+ uid = nn.uid
+ active = nn.active
+ stake = nn.stake
+ rank = nn.rank
+ trust = nn.trust
+ consensus = nn.consensus
+ incentive = nn.incentive
+ dividends = nn.dividends
+ emission = int(nn.emission * 1000000000)
+ last_update = int(block - nn.last_update)
+ row = [
+ hotwallet.hotkey_str,
+ str(uid),
+ str(active),
+ '{:.5f}'.format(stake),
+ '{:.5f}'.format(rank),
+ '{:.5f}'.format(trust),
+ '{:.5f}'.format(consensus),
+ '{:.5f}'.format(incentive),
+ '{:.5f}'.format(dividends),
+ '{}'.format(emission),
+ str(last_update),
+ bittensor.utils.networking.int_to_ip( nn.ip) + ':' + str(nn.port) if nn.port != 0 else '[yellow]none[/yellow]',
+ nn.hotkey
+ ]
+ total_stake += stake
+ total_rank += rank
+ total_trust += trust
+ total_consensus += consensus
+ total_incentive += incentive
+ total_dividends += dividends
+ total_emission += emission
+ TABLE_DATA.append(row)
+
+ total_neurons = len(neurons)
+ table = Table(show_footer=False)
+ table.title = (
+ "[white]Wallet - {}:{}".format(self.config.wallet.name, hotwallet.coldkeypub.ss58_address)
+ )
+ table.add_column("[overline white]HOTKEY NAME", str(total_neurons), footer_style = "overline white", style='bold white')
+ table.add_column("[overline white]UID", str(total_neurons), footer_style = "overline white", style='yellow')
+ table.add_column("[overline white]ACTIVE", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]STAKE(\u03C4)", '\u03C4{:.5f}'.format(total_stake), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]RANK", '{:.5f}'.format(total_rank), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]TRUST", '{:.5f}'.format(total_trust), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]CONSENSUS", '{:.5f}'.format(total_consensus), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]INCENTIVE", '{:.5f}'.format(total_incentive), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]DIVIDENDS", '{:.5f}'.format(total_dividends), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]EMISSION(\u03C1)", '\u03C1{}'.format(int(total_emission)), footer_style = "overline white", justify='right', style='green', no_wrap=True)
+ table.add_column("[overline white]UPDATED", justify='right', no_wrap=True)
+ table.add_column("[overline white]AXON", justify='left', style='dim blue', no_wrap=True)
+ table.add_column("[overline white]HOTKEY", style='dim blue', no_wrap=False)
+ table.show_footer = True
+ table.caption = "[white]Wallet balance: [green]\u03C4" + str(balance.tao)
+
+ console.clear()
+ for row in TABLE_DATA:
+ table.add_row(*row)
+ table.box = None
+ table.pad_edge = False
+ table.width = None
+ console.print(table)
+
+class CacheException(Exception):
+ """
+ Exception raised when the cache has an issue or should not be used.
+ """
\ No newline at end of file
diff --git a/bittensor/_endpoint/__init__.py b/bittensor/_endpoint/__init__.py
index e3d66d44c5..8de7747455 100644
--- a/bittensor/_endpoint/__init__.py
+++ b/bittensor/_endpoint/__init__.py
@@ -45,8 +45,8 @@ def __new__(
ip:str,
ip_type:int,
port:int,
- protocol:int,
coldkey:str,
+ protocol:int = 0, # TODO: activate protocol
modality: int = 0 # TODO: remove modality
) -> 'bittensor.Endpoint':
endpoint.assert_format(
@@ -79,16 +79,29 @@ def from_neuron( neuron: 'bittensor.NeuronInfo' ) -> 'bittensor.Endpoint':
if neuron.is_null:
raise ValueError('Cannot create endpoint from null neuron')
- return endpoint_impl.Endpoint(
- version = neuron.axon_info.version,
- uid = neuron.uid,
- hotkey = neuron.hotkey,
- port = neuron.axon_info.port,
- ip = neuron.axon_info.ip,
- ip_type = neuron.axon_info.ip_type,
- protocol = neuron.axon_info.protocol,
- coldkey = neuron.coldkey
- )
+ if hasattr(neuron, 'axon_info'): #if config.subtensor.network == 'finney'
+ return endpoint_impl.Endpoint(
+ version = neuron.axon_info.version,
+ uid = neuron.uid,
+ hotkey = neuron.hotkey,
+ port = neuron.axon_info.port,
+ ip = neuron.axon_info.ip,
+ ip_type = neuron.axon_info.ip_type,
+ protocol = neuron.axon_info.protocol,
+ coldkey = neuron.coldkey
+ )
+ else:
+ return endpoint_impl.Endpoint(
+ version = neuron.version,
+ uid = neuron.uid,
+ hotkey = neuron.hotkey,
+ port = neuron.port,
+ ip = neuron.ip,
+ ip_type = neuron.ip_type,
+ modality = neuron.modality,
+ coldkey = neuron.coldkey,
+ protocol = None
+ )
@staticmethod
def from_dict(endpoint_dict: dict) -> 'bittensor.Endpoint':
diff --git a/bittensor/_metagraph/__init__.py b/bittensor/_metagraph/__init__.py
index 3d3cf65f29..b19e90fce7 100644
--- a/bittensor/_metagraph/__init__.py
+++ b/bittensor/_metagraph/__init__.py
@@ -26,6 +26,7 @@
from . import metagraph_mock
from typing import Optional, List
import bittensor.utils.weight_utils as weight_utils
+from .naka_metagraph_impl import Metagraph as naka_metagraph
class metagraph:
""" Factory class for the bittensor.Metagraph class or the MockMetagraph
@@ -43,6 +44,7 @@ def __new__(
config: 'bittensor.config' = None,
network: str = None,
netuid: Optional[int] = None,
+ subtensor: 'bittensor.Subtensor' = None,
_mock:bool=None
) -> 'bittensor.Metagraph':
r""" Creates a new bittensor.Metagraph object from passed arguments.
@@ -68,12 +70,17 @@ def __new__(
config.metagraph._mock = _mock if _mock != None else config.metagraph._mock
if config.metagraph._mock:
return metagraph_mock.MockMetagraph()
+ if subtensor != None:
+ network = subtensor.network
if netuid == None:
netuid = config.get('netuid', None)
if network == None:
- network = config.get('subtensor.network', None)
-
- return metagraph_impl.Metagraph( network = network, netuid = netuid )
+ network = config.subtensor.get('network', bittensor.defaults.subtensor.network)
+
+ if network =='finney':
+ return metagraph_impl.Metagraph( network = network, netuid = netuid )
+ elif network =='nakamoto':
+ return naka_metagraph(config = config, subtensor = subtensor)
@classmethod
def config(cls) -> 'bittensor.Config':
diff --git a/bittensor/_metagraph/naka_metagraph_impl.py b/bittensor/_metagraph/naka_metagraph_impl.py
new file mode 100644
index 0000000000..0841ab01f1
--- /dev/null
+++ b/bittensor/_metagraph/naka_metagraph_impl.py
@@ -0,0 +1,601 @@
+""" Maintains chain state as a torch.nn.Module.
+"""
+# The MIT License (MIT)
+# Copyright © 2021 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import os
+
+from typing import List
+from loguru import logger
+
+import ast
+import pandas
+import torch.nn.functional as f
+import torch
+import pickle
+import json
+
+import bittensor
+import bittensor.utils.networking as net
+import bittensor.utils.weight_utils as weight_utils
+
+RAOPERTAO = 1000000000
+U64MAX = 18446744073709551615
+
+class Metagraph( torch.nn.Module ):
+ r""" Maintains chain state as a torch.nn.Module.
+ Interface:
+ tau (:obj:`torch.FloatTensor` of shape :obj:`(1)`):
+ Current, per block, token emission rate.
+ block (:obj:`torch.LongTensor` of shape :obj:`(1)`):
+ State block number.
+ uids (:obj:`torch.LongTensor` of shape :obj:`(metagraph.n)`):
+ UIDs for each neuron.
+
+ stake (:obj:`torch.LongTensor` of shape :obj:`(metagraph.n)`):
+ Stake balance for each neuron ordered by uid.
+
+ last_update (:obj:`torch.LongTensor` of shape :obj:`(metagraph.n)`):
+ Last emission call for each neuron ordered by uid.
+ weights (:obj:`torch.FloatTensor` of shape :obj:`(metagraph.n, metagraph.n)`):
+ Full weight matrix on chain ordered by uid.
+ neurons (:obj:`torch.LongTensor` of shape :obj:`(metagraph.n, -1)`)
+ Tokenized endpoint information.
+ """
+ def __init__( self, subtensor, config ):
+ r""" Initializes a new Metagraph torch chain interface object.
+ """
+ super(Metagraph, self).__init__()
+ self.config = config
+ if subtensor == None:
+ subtensor = bittensor.subtensor( config = config)
+ self.subtensor = subtensor
+ self.clear()
+
+ def clear( self ) -> 'Metagraph':
+ r""" Erases Metagraph state.
+ """
+ self.version = torch.nn.Parameter( torch.tensor( [ bittensor.__version_as_int__ ], dtype=torch.int64), requires_grad=False )
+ self.n = torch.nn.Parameter( torch.tensor( [0], dtype=torch.int64), requires_grad = False )
+ self.tau = torch.nn.Parameter( torch.tensor( [1], dtype=torch.float32), requires_grad = False )
+ self.block = torch.nn.Parameter( torch.tensor( [0], dtype=torch.int64), requires_grad = False )
+ self.stake = torch.nn.Parameter( torch.tensor( [], dtype=torch.float32), requires_grad=False )
+ self.ranks = torch.nn.Parameter( torch.tensor( [], dtype=torch.float32), requires_grad=False )
+ self.trust = torch.nn.Parameter( torch.tensor( [], dtype=torch.float32), requires_grad=False )
+ self.consensus = torch.nn.Parameter( torch.tensor( [], dtype=torch.float32), requires_grad=False )
+ self.incentive = torch.nn.Parameter( torch.tensor( [], dtype=torch.float32), requires_grad=False )
+ self.emission = torch.nn.Parameter( torch.tensor( [], dtype=torch.float32), requires_grad=False )
+ self.dividends = torch.nn.Parameter( torch.tensor( [], dtype=torch.float32), requires_grad=False )
+ self.active = torch.nn.Parameter( torch.tensor( [], dtype=torch.int64), requires_grad=False )
+ self.last_update = torch.nn.Parameter( torch.tensor( [], dtype=torch.int64), requires_grad=False )
+ self.weights = torch.nn.Parameter( torch.tensor( [], dtype=torch.float32), requires_grad=False )
+ self.bonds = torch.nn.Parameter( torch.tensor( [], dtype=torch.int64), requires_grad=False )
+ self.endpoints = torch.nn.Parameter( torch.tensor( [], dtype=torch.int64), requires_grad=False )
+ self.uids = torch.nn.Parameter( torch.tensor([], dtype = torch.int64),requires_grad=False )
+ self._endpoint_objs = None
+ self.neurons = None
+ return self
+
+ def forward (
+ self,
+ uid: int,
+ row_weight: torch.FloatTensor
+ ) -> torch.FloatTensor:
+ """
+ Returns a dividend vector for a change in weights by computing the full incenvite function.
+ Args:
+ uid (int):
+ uid to set weights.
+ row_weights: (torch.FloatTensor, shape =(n)):
+ normalized row to replace at uid.
+ Returns:
+ dividends (torch.FloatTensor):
+ Dividends for the entire network.
+ """
+
+ # Return if there are no neurons.
+ if self.n.item() == 0:
+ return torch.tensor([], dtype=torch.float32)
+
+ # Raise if the passed weights are badly shaped.
+ if torch.numel( row_weight ) != self.n.item():
+ raise ValueError('Passed weight update must have the dimension of a row in W. Got {}, expected {}', row_weight.size(), self.n.item())
+
+ # Reshape to fit weights.
+ row_weight = row_weight.view( self.n )
+
+ # Normalize row.
+ if torch.abs( torch.sum( row_weight ) - 1 ) > 0.0001:
+ row_weight = f.normalize(row_weight, p=1, dim=0)
+
+ # Raise if the passed weights are badly shaped.
+ if uid >= self.n.item():
+ raise ValueError('Passed uid does not exist in the graph. Got {} > {}', uid, self.n.item())
+
+ weight = self.W.detach().clone()
+ weight[uid,:] = row_weight
+
+ # Compute ranks.
+ S = self.S.view(self.n, 1)
+ Wt = torch.transpose(weight, 0, 1)
+ R = torch.matmul(Wt, S).view(self.n)
+
+ # Compute trust.
+ T = torch.matmul((Wt != 0).float(), S).view(self.n)
+
+ # Compute consensus.
+ rho = 10
+ kappa = 0.5
+ # Return if there is no stake.
+ if torch.sum( self.S ) == 0:
+ C = torch.sigmoid( rho * (T - kappa) ).view(self.n)
+ else:
+ C = torch.sigmoid( rho * (T / torch.sum(S) - kappa) ).view(self.n)
+
+ # Compute incentive.
+ Incentive = (R * C).view(self.n)
+ print (Incentive)
+
+ # Compute emission.
+ if torch.sum(Incentive) == 0:
+ Inflation = torch.zeros( (self.n.item()), dtype=torch.float32 ).view(self.n)
+ else:
+ Inflation = (self.tau * Incentive).view(self.n)
+ print (Inflation)
+
+ # Compute bonds.
+ B = self.B.detach().clone().float()
+ B_norm = f.normalize(B, p=1, dim=1)
+ print (B_norm)
+
+ # Dividends
+ D = torch.matmul( B_norm.view(self.n, self.n), Inflation.view(self.n, 1) ).view(self.n) + 0.5 * Inflation.view(self.n)
+ print (D)
+
+ # Return dividends.
+ return D.view(self.n)
+
+ @property
+ def S(self) -> torch.FloatTensor:
+ """ Stake
+ """
+ return self.stake
+
+ @property
+ def R(self) -> torch.FloatTensor:
+ """ Rank
+ """
+ return self.ranks
+
+ @property
+ def I(self) -> torch.FloatTensor:
+ """ Incentive
+ """
+ return self.incentive
+
+ @property
+ def E(self) -> torch.FloatTensor:
+ """ Emission
+ """
+ return self.emission
+
+ @property
+ def C(self) -> torch.FloatTensor:
+ """ Consensus
+ """
+ return self.consensus
+
+ @property
+ def T(self) -> torch.FloatTensor:
+ """ Trust
+ """
+ return self.trust
+
+ @property
+ def D(self) -> torch.FloatTensor:
+ """ Dividends
+ """
+ return self.dividends
+
+ @property
+ def B(self) -> torch.FloatTensor:
+ """ Bonds
+ """
+ return self.bonds
+
+ @property
+ def W(self) -> torch.FloatTensor:
+ """ Weights
+ """
+ return self.weights
+
+ @property
+ def hotkeys( self ) -> List[str]:
+ r""" Returns hotkeys for each neuron.
+ Returns:
+ hotkeys (:obj:`List[str] of shape :obj:`(metagraph.n)`):
+ Neuron hotkeys.
+ """
+ if self.n.item() == 0:
+ return []
+ return [ neuron.hotkey if neuron != bittensor.endpoint.dummy() else '' for neuron in self.endpoint_objs ]
+
+ @property
+ def coldkeys( self ) -> List[str]:
+ r""" Returns coldkeys for each neuron.
+ Returns:
+ coldkeys (:obj:`List[str] of shape :obj:`(metagraph.n)`):
+ Neuron coldkeys.
+ """
+ if self.n.item() == 0:
+ return []
+ return [ neuron.coldkey if neuron != bittensor.endpoint.dummy() else '' for neuron in self.endpoint_objs ]
+
+ @property
+ def modalities( self ) -> List[str]:
+ r""" Returns the modality for each neuron.
+ Returns:
+ coldkeys (:obj:`List[str] of shape :obj:`(metagraph.n)`):
+ Neuron coldkeys.
+ """
+ if self.n.item() == 0:
+ return []
+ return [ neuron.modality if neuron != bittensor.endpoint.dummy() else '' for neuron in self.endpoint_objs ]
+
+ @property
+ def addresses( self ) -> List[str]:
+ r""" Returns ip addresses for each neuron.
+ Returns:
+ coldkeys (:obj:`List[str] of shape :obj:`(metagraph.n)`):
+ Neuron address.
+ """
+ if self.n.item() == 0:
+ return []
+ return [ net.ip__str__( neuron.ip_type, neuron.ip, neuron.port ) if neuron != bittensor.endpoint.dummy() else '' for neuron in self.endpoint_objs ]
+
+ @property
+ def endpoint_objs( self ) -> List['bittensor.Endpoint']:
+ r""" Returns endpoints as objects.
+ Returns:
+ endpoint_obj (:obj:`List[bittensor.Endpoint] of shape :obj:`(metagraph.n)`):
+ Endpoints as objects.
+ """
+ if self.n.item() == 0:
+ return []
+ elif self._endpoint_objs != None:
+ return self._endpoint_objs
+ else:
+ self._endpoint_objs = []
+ for tensor in self.endpoints:
+ obj = bittensor.endpoint.from_tensor( tensor )
+ self._endpoint_objs.append( obj )
+ return self._endpoint_objs
+
+ def hotkey_to_uid( self, hotkey:str ) -> int:
+ r""" Fetch uid according to hotkey.
+ Args:
+ hotkey: (`str`, required):
+ Hotkey to fetch the uid for.
+
+ Return:
+ uid: (`int`):
+ The uid for specified hotkey, -1 if hotkey does not exist.
+ """
+ if hotkey in self.hotkeys:
+ return self.hotkeys.index(hotkey)
+ else:
+ return -1
+
+ def load( self, network:str = None ) -> 'Metagraph':
+ r""" Loads this metagraph object's state_dict from bittensor root dir.
+ Args:
+ network: (:obj:`str`, required):
+ Name of state_dict to load, defaults to kusanagi
+ """
+ try:
+ if network == None:
+ network = self.subtensor.network
+ metagraph_path = '~/.bittensor/' + str(network) + '.pt'
+ metagraph_path = os.path.expanduser(metagraph_path)
+ if os.path.isfile(metagraph_path):
+ self.load_from_path( path = metagraph_path )
+ else:
+ logger.warning('Did not load metagraph from path: {}, file does not exist. Run metagraph.save() first.', metagraph_path)
+ except Exception as e:
+ logger.exception(e)
+ return self
+
+ def save( self, network:str = None ) -> 'Metagraph':
+ r""" Saves this metagraph object's state_dict under bittensor root dir.
+ Args:
+ network: (:obj:`str`, required):
+ Name of state_dict, defaults to kusanagi
+ """
+ if network == None:
+ network = self.subtensor.network
+ return self.save_to_path( path = '~/.bittensor/', filename = str(network) + '.pt')
+
+ def load_from_path(self, path:str ) -> 'Metagraph':
+ r""" Loads this metagraph object with state_dict under the specified path.
+ Args:
+ path: (:obj:`str`, required):
+ Path to load state_dict.
+ """
+ full_path = os.path.expanduser(path)
+ metastate = torch.load( full_path )
+ return self.load_from_state_dict( metastate )
+
+ def save_to_path(self, path:str, filename:str ) -> 'Metagraph':
+ r""" Saves this metagraph object's state_dict to the specified path.
+ Args:
+ path: (:obj:`str`, required):
+ Path to save state_dict.
+ """
+ full_path = os.path.expanduser(path)
+ os.makedirs(full_path, exist_ok=True)
+ metastate = self.state_dict()
+ torch.save(metastate, full_path + '/' + filename)
+ return self
+
+ def load_from_state_dict(self, state_dict:dict ) -> 'Metagraph':
+ r""" Loads this metagraph object from passed state_dict.
+ Args:
+ state_dict: (:obj:`dict`, required):
+ Metagraph state_dict. Must be same as that created by save_to_path.
+ """
+ self.version = torch.nn.Parameter( state_dict['version'], requires_grad=False )
+ self.n = torch.nn.Parameter( state_dict['n'], requires_grad=False )
+ self.tau = torch.nn.Parameter( state_dict['tau'], requires_grad=False )
+ self.block = torch.nn.Parameter( state_dict['block'], requires_grad=False )
+ self.uids = torch.nn.Parameter( state_dict['uids'], requires_grad=False )
+ self.stake = torch.nn.Parameter( state_dict['stake'], requires_grad=False )
+ self.ranks = torch.nn.Parameter( state_dict['ranks'], requires_grad=False )
+ self.trust = torch.nn.Parameter( state_dict['trust'], requires_grad=False )
+ self.consensus = torch.nn.Parameter( state_dict['consensus'], requires_grad=False )
+ self.incentive = torch.nn.Parameter( state_dict['incentive'], requires_grad=False )
+ self.emission = torch.nn.Parameter( state_dict['emission'], requires_grad=False )
+ self.dividends = torch.nn.Parameter( state_dict['dividends'], requires_grad=False )
+ self.active = torch.nn.Parameter( state_dict['active'], requires_grad=False )
+ self.last_update = torch.nn.Parameter( state_dict['last_update'], requires_grad=False )
+ self.weights = torch.nn.Parameter( state_dict['weights'], requires_grad=False )
+ self.bonds = torch.nn.Parameter( state_dict['bonds'], requires_grad=False )
+ self.endpoints = torch.nn.Parameter( state_dict['endpoints'], requires_grad=False )
+ self._endpoint_objs = None
+ return self
+
+ def retrieve_cached_neurons( self, block: int = None ):
+ """
+ Retrieves cached metagraph syncs from IPFS.
+ """
+ ipfs = bittensor.Ipfs()
+ ipns_hash = ipfs.latest_neurons_ipns
+ ipfs_hash = ipfs.cat
+
+ if block != None:
+ ipns_hash = ipfs.historical_neurons_ipns
+ ipfs_hash = ipfs.node_get
+
+ try:
+ # Ping IPNS for latest IPFS hash
+ ipns_resolve = ipfs.retrieve_directory(ipfs.ipns_resolve, (('arg', ipns_hash),))
+
+ # Extract IPFS hash from IPNS response
+ ipfs_path = ast.literal_eval(ipns_resolve.text)
+ except Exception as e:
+ logger.error("Error detected in metagraph sync: {} with sample text {}".format(e,ipns_resolve.text))
+
+ # Try Again
+ # Ping IPNS for latest IPFS hash
+ ipns_resolve = ipfs.retrieve_directory(ipfs.ipns_resolve, (('arg', ipns_hash),))
+
+ # Extract IPFS hash from IPNS response
+ ipfs_path = ast.literal_eval(ipns_resolve.text)
+
+ ipfs_resolved_hash = ipfs_path['Path'].split("ipfs/")[1]
+ ipfs_response = ipfs.retrieve_directory(ipfs_hash, (('arg', ipfs_resolved_hash),))
+
+ # Extract all neuron sync hashes
+ if block != None:
+ historical_neurons = json.loads(ipfs_response.content)['Links']
+ # Find the one that corresponds to our block
+ sync_data = next(item for item in historical_neurons if item["Name"] == "nakamoto-{}.pkl".format(block))
+ # Retrieve Neuron contents
+ ipfs_response = ipfs.retrieve_directory(ipfs.cat, (('arg', sync_data['Hash']),))
+
+ # Unpickle the response
+ neurons = pickle.loads(ipfs_response.content)
+
+ return neurons
+
+ def sync ( self, block: int = None, cached: bool = True, netuid: int = 1 ) -> 'Metagraph':
+ r""" Synchronizes this metagraph with the chain state.
+ """
+ logger.success(self.subtensor)
+ if block == None:
+ block = self.subtensor.get_current_block()
+ if cached and self.subtensor.network in ("nakamoto", "local"):
+ if bittensor.__use_console__:
+ with bittensor.__console__.status("Synchronizing Metagraph...", spinner="earth"):
+ try:
+ neurons = self.retrieve_cached_neurons( )
+ except:
+ # For some reason IPFS cache is down, fallback on regular sync
+ logger.warning("IPFS cache may be down, falling back to regular sync")
+ neurons = self.subtensor.neurons()
+ n_total = len(neurons)
+ else:
+ try:
+ neurons = self.retrieve_cached_neurons( )
+ except:
+ # For some reason IPFS cache is down, fallback on regular sync
+ logger.warning("IPFS cache may be down, falling back to regular sync")
+ neurons = self.subtensor.neurons()
+ n_total = len(neurons)
+ else:
+ neurons = self.subtensor.neurons( block = block )
+ n_total = len(neurons)
+ else:
+ if cached and self.subtensor.network in ("nakamoto", "local"):
+ if bittensor.__use_console__:
+ with bittensor.__console__.status("Synchronizing Metagraph...", spinner="earth"):
+ try:
+ neurons = self.retrieve_cached_neurons( block = block )
+ except:
+ # For some reason IPFS cache is down, fallback on regular sync
+ logger.warning("IPFS cache may be down, falling back to regular sync to get block {}".format(block))
+ neurons = self.subtensor.neurons( block = block )
+ n_total = len(neurons)
+ else:
+ try:
+ neurons = self.retrieve_cached_neurons( block = block )
+ except:
+ # For some reason IPFS cache is down, fallback on regular sync
+ logger.warning("IPFS cache may be down, falling back to regular sync to get block {}".format(block))
+ neurons = self.subtensor.neurons( block = block )
+ n_total = len(neurons)
+ else:
+ neurons = self.subtensor.neurons( block = block )
+ n_total = len(neurons)
+
+ # Fill arrays.
+ uids = [ i for i in range(n_total) ]
+ active = [ 0 for _ in range(n_total) ]
+ stake = [ 0 for _ in range(n_total) ]
+ ranks = [ 0 for _ in range(n_total) ]
+ trust = [ 0 for _ in range(n_total) ]
+ consensus = [ 0 for _ in range(n_total) ]
+ incentive = [ 0 for _ in range(n_total) ]
+ emission = [ 0 for _ in range(n_total) ]
+ dividends = [ 0 for _ in range(n_total) ]
+ last_updates = [ -1 for _ in range(n_total) ]
+ endpoints = [ [-1 for _ in range(250) ] for _ in range(n_total) ]
+ weights = [ [ 0 for _ in range(n_total) ] for _ in range(n_total) ]
+ bonds = [ [0 for _ in range(n_total) ] for _ in range(n_total) ]
+ self._endpoint_objs = [ bittensor.endpoint.dummy() for _ in range(n_total) ]
+ self.neurons = [None for _ in range(n_total)]
+ for n in neurons:
+ self.neurons[n.uid] = n
+ uids[n.uid] = n.uid
+ active[n.uid] = n.active
+ stake[n.uid] = n.stake
+ ranks[n.uid] = n.rank
+ trust[n.uid] = n.trust
+ consensus[n.uid] = n.consensus
+ incentive[n.uid] = n.incentive
+ dividends[n.uid] = n.dividends
+ emission[n.uid] = n.emission
+ last_updates[n.uid] = n.last_update
+ endpoint = bittensor.endpoint(
+ version = int(n.version),
+ uid = int(n.uid),
+ hotkey = str(n.hotkey),
+ ip_type = int(n.ip_type),
+ ip = str(n.ip),
+ port = int(n.port),
+ modality = int(n.modality),
+ coldkey = str(n.coldkey)
+ )
+ self._endpoint_objs[n.uid] = endpoint
+ endpoints[n.uid] = endpoint.to_tensor().tolist()
+ if len(n.weights) > 0:
+ w_uids, w_weights = zip(*n.weights)
+ weights[n.uid] = weight_utils.convert_weight_uids_and_vals_to_tensor( n_total, w_uids, w_weights ).tolist()
+ else:
+ weights[n.uid] = [0] * n_total
+ if len(n.bonds) > 0:
+ b_uids, b_bonds = zip(*n.bonds)
+ bonds[n.uid] = weight_utils.convert_bond_uids_and_vals_to_tensor( n_total, b_uids, b_bonds ).tolist()
+ else:
+ bonds[n.uid] = [0] * n_total
+
+ # Set tensors.
+ tn = torch.tensor( n_total, dtype=torch.int64 )
+ tblock = torch.tensor( block, dtype=torch.int64 )
+ tuids = torch.tensor( uids, dtype=torch.int64 )
+ tactive = torch.tensor( active, dtype=torch.int64 )
+ tstake = torch.tensor( stake, dtype=torch.float32 )
+ tranks = torch.tensor( ranks, dtype=torch.float32 )
+ ttrust = torch.tensor( trust, dtype=torch.float32 )
+ tconsensus = torch.tensor( consensus, dtype=torch.float32 )
+ tincentive = torch.tensor( incentive, dtype=torch.float32 )
+ temission = torch.tensor( emission, dtype=torch.float32 )
+ tdividends = torch.tensor( dividends, dtype=torch.float32 )
+ tlast_update = torch.tensor( last_updates, dtype=torch.int64 )
+ tbonds = torch.tensor( bonds, dtype=torch.int64 )
+ tweights = torch.tensor( weights, dtype=torch.float32 )
+ tendpoints = torch.tensor( endpoints, dtype=torch.int64 )
+
+ # Normalize bond ownership.
+ tbonds = torch.nn.functional.normalize( tbonds.float(), p=1, dim=0, eps=1e-12 ) * 0.5 + torch.eye( tn ) * 0.5
+
+ # Set params.
+ self.n = torch.nn.Parameter( tn, requires_grad=False )
+ self.block = torch.nn.Parameter( tblock, requires_grad=False )
+ self.uids = torch.nn.Parameter( tuids, requires_grad=False )
+ self.stake = torch.nn.Parameter( tstake, requires_grad=False )
+ self.ranks = torch.nn.Parameter( tranks, requires_grad=False )
+ self.trust = torch.nn.Parameter( ttrust, requires_grad=False )
+ self.consensus = torch.nn.Parameter( tconsensus, requires_grad=False )
+ self.incentive = torch.nn.Parameter( tincentive, requires_grad=False )
+ self.emission = torch.nn.Parameter( temission, requires_grad=False )
+ self.dividends = torch.nn.Parameter( tdividends, requires_grad=False )
+ self.active = torch.nn.Parameter( tactive, requires_grad=False )
+ self.last_update = torch.nn.Parameter( tlast_update, requires_grad=False )
+ self.weights = torch.nn.Parameter( tweights, requires_grad=False )
+ self.bonds = torch.nn.Parameter( tbonds, requires_grad=False )
+ self.endpoints = torch.nn.Parameter( tendpoints, requires_grad=False )
+
+ # For contructor.
+ return self
+
+ def to_dataframe(self):
+ try:
+ index = self.uids.tolist()
+ columns = [ 'uid', 'active', 'stake', 'rank', 'trust', 'consensus', 'incentive', 'dividends', 'emission']
+ dataframe = pandas.DataFrame(columns = columns, index = index)
+ for uid in self.uids.tolist():
+ v = {
+ 'uid': self.uids[uid].item(),
+ 'active': self.active[uid].item(),
+ 'stake': self.stake[uid].item(),
+ 'rank': self.ranks[uid].item(),
+ 'trust': self.trust[uid].item(),
+ 'consensus': self.consensus[uid].item(),
+ 'incentive': self.incentive[uid].item(),
+ 'dividends': self.dividends[uid].item(),
+ 'emission': self.emission[uid].item()
+ }
+ dataframe.loc[uid] = pandas.Series( v )
+ dataframe['uid'] = dataframe.index
+ return dataframe
+ except Exception as e:
+ bittensor.logging.error('failed metagraph.to_dataframe()', str(e))
+ return pandas.DataFrame()
+
+ def to_wandb(self):
+ wandb_info = {
+ 'metagraph_n': self.n.item(),
+ 'metagraph_tau': self.tau.item(),
+ 'metagraph_block': self.block.item(),
+ }
+ return wandb_info
+
+ def __str__(self):
+ return "Metagraph({}, {}, {})".format(self.n.item(), self.block.item(), self.subtensor.network)
+
+ def __repr__(self):
+ return self.__str__()
diff --git a/bittensor/_neuron/text/core_server/__init__.py b/bittensor/_neuron/text/core_server/__init__.py
index 4533d2e7cd..bfd207b43c 100644
--- a/bittensor/_neuron/text/core_server/__init__.py
+++ b/bittensor/_neuron/text/core_server/__init__.py
@@ -101,15 +101,6 @@ def __init__(
config.netuid = netuid if netuid != None else config.netuid
- subtensor = bittensor.subtensor ( config = config ) if subtensor == None else subtensor
- if config.netuid == None:
- config.netuid = subtensor.get_subnets()[0]
-
- # Verify subnet exists
- if not subtensor.subnet_exists( netuid = config.netuid ):
- bittensor.__console__.print(f"[red]Subnet {config.netuid} does not exist[/red]")
- sys.exit(1)
-
if synapse_list != None:
config.neuron.lasthidden = False
config.neuron.causallm = False
@@ -155,7 +146,7 @@ def __init__(
self.prometheus_info = Info('neuron_info', "Info sumamries for the running server-miner.", registry=registry)
self.config.to_prometheus()
- if self.config.netuid == None:
+ if self.config.netuid == None and self.config.subtensor.network == 'finney':
subtensor = bittensor.subtensor(config = config) if subtensor == None else subtensor
self.config.netuid = subtensor.get_subnets()[0]
@@ -182,7 +173,7 @@ def __init__(
self.query_data = {}
# Verify subnet exists
- if not self.subtensor.subnet_exists( netuid = self.config.netuid ):
+ if self.config.subtensor.network == 'finney' and not self.subtensor.subnet_exists( netuid = self.config.netuid ):
bittensor.__console__.print(f"[red]Subnet {self.config.netuid} does not exist[/red]")
sys.exit(1)
@@ -261,7 +252,7 @@ def run(
)
last_set_block = self.subtensor.get_current_block()
- blocks_per_set_weights = self.subtensor.validator_epoch_length(self.config.netuid) if self.config.neuron.blocks_per_set_weights == -1 else self.config.neuron.blocks_per_set_weights
+ blocks_per_set_weights = self.get_blocks_per_set_weights()
epoch_starting_successes = self.axon.stats.total_successes
epoch_starting_requests = self.axon.stats.total_requests
# --- Run Forever.
@@ -269,8 +260,7 @@ def run(
iteration = 0
local_data = {}
self.query_data = {}
-
- nn = self.subtensor.get_neuron_for_pubkey_and_subnet(self.wallet.hotkey.ss58_address, netuid = self.config.netuid)
+ nn = self.get_neuron()
uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address )
current_block = self.subtensor.get_current_block()
end_block = current_block + self.config.neuron.blocks_per_epoch
@@ -398,7 +388,7 @@ def run(
try:
# Set self weights to maintain activity.
# --- query the chain for the most current number of peers on the network
- chain_weights = torch.zeros(self.subtensor.subnetwork_n( netuid = self.config.netuid ))
+ chain_weights = torch.zeros(self.get_neuron_num())
chain_weights [ uid ] = 1
did_set = self.subtensor.set_weights(
uids=torch.arange(0,len(chain_weights)),
@@ -406,6 +396,7 @@ def run(
weights = chain_weights,
wait_for_inclusion = False,
wallet = self.wallet,
+ version_key =1
)
if did_set:
logger.success('Successfully set weights on the chain')
@@ -638,3 +629,27 @@ def time_check():
except Exception as e:
self.prometheus_counters.labels("blacklisted").inc()
return True
+
+ def get_neuron(self):
+ if self.subtensor.network == 'finney':
+ nn = self.subtensor.get_neuron_for_pubkey_and_subnet(self.wallet.hotkey.ss58_address, netuid = self.config.netuid)
+ elif self.subtensor.network == 'nakamoto':
+ nn = self.subtensor.neuron_for_pubkey(self.wallet.hotkey.ss58_address)
+ return nn
+
+ def get_neuron_num(self):
+ if self.subtensor.network == 'finney':
+ n = self.subtensor.subnetwork_n( netuid = self.config.netuid)
+ elif self.subtensor.network == 'nakamoto':
+ n = self.subtensor.n()
+ return n
+
+ def get_blocks_per_set_weights(self):
+ blocks_per_set_weights = self.config.neuron.blocks_per_set_weights
+ if blocks_per_set_weights == -1:
+ if self.subtensor.network == 'finney':
+ blocks_per_set_weights = self.subtensor.validator_epoch_length(self.config.netuid)
+ elif self.subtensor.network == 'nakamoto':
+ blocks_per_set_weights = self.subtensor.validator_epoch_length
+
+ return blocks_per_set_weights
\ No newline at end of file
diff --git a/bittensor/_neuron/text/core_validator/__init__.py b/bittensor/_neuron/text/core_validator/__init__.py
index 2e87e64ed2..a195e79042 100644
--- a/bittensor/_neuron/text/core_validator/__init__.py
+++ b/bittensor/_neuron/text/core_validator/__init__.py
@@ -102,11 +102,11 @@ def __init__(
config.netuid = netuid if netuid != None else config.netuid
subtensor = bittensor.subtensor ( config = config ) if subtensor == None else subtensor
- if config.netuid == None:
+ if config.subtensor.network == 'finney' and config.netuid == None:
config.netuid = subtensor.get_subnets()[0]
# Verify subnet exists
- if not subtensor.subnet_exists( netuid = config.netuid ):
+ if config.subtensor.network == 'finney' and not subtensor.subnet_exists( netuid = config.netuid ):
bittensor.__console__.print(f"[red]Subnet {config.netuid} does not exist[/red]")
sys.exit(1)
@@ -147,9 +147,15 @@ def __init__(
self.axon = bittensor.axon ( netuid=self.config.netuid, config = self.config, wallet = self.wallet ) if axon == None else axon
self.device = torch.device ( device = self.config.neuron.device )
self.nucleus = nucleus ( config = self.config, device = self.device, subtensor = self.subtensor, vlogger = self.vlogger ).to( self.device )
- self.dataset = (bittensor.dataset(config=self.config, batch_size=self.subtensor.validator_batch_size(self.config.netuid),
- block_size=self.subtensor.validator_sequence_length(self.config.netuid) + self.config.neuron.validation_len + self.subtensor.validator_prune_len(netuid=self.config.netuid))
- if dataset is None else dataset)
+ if self.config.subtensor.network == 'finney':
+ self.dataset = (bittensor.dataset(config=self.config, batch_size=self.subtensor.validator_batch_size(self.config.netuid),
+ block_size=self.subtensor.validator_sequence_length(self.config.netuid) + self.config.neuron.validation_len + self.subtensor.validator_prune_len(netuid=self.config.netuid))
+ if dataset is None else dataset)
+ else:
+ self.dataset = (bittensor.dataset(config=self.config, batch_size=self.subtensor.validator_batch_size,
+ block_size=self.subtensor.validator_sequence_length + self.config.neuron.validation_len + self.subtensor.validator_prune_len)
+ if dataset is None else dataset)
+
self.optimizer = torch.optim.SGD(
self.nucleus.parameters(), lr=self.config.neuron.learning_rate, momentum=self.config.neuron.momentum
)
@@ -371,19 +377,31 @@ def run_epoch( self ):
"""
# === Get params for epoch ===
# Pulling the latest chain parameters.
+ if self.config.subtensor.network == 'finney':
+ batch_size = self.subtensor.validator_batch_size(netuid=self.config.netuid)
+ sequence_length = self.subtensor.validator_sequence_length(netuid=self.config.netuid)
+ # Number of tokens to prune for phrase validation beyond sequence context
+ prune_len = self.config.neuron.prune_len = self.subtensor.validator_prune_len(netuid=self.config.netuid)
+ self.config.nucleus.logits_divergence = self.subtensor.validator_logits_divergence(netuid=self.config.netuid)
+ min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid)
+ max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid)
+ self.config.nucleus.scaling_law_power = self.subtensor.scaling_law_power(netuid=self.config.netuid)
+ self.config.nucleus.synergy_scaling_law_power = self.subtensor.synergy_scaling_law_power(netuid=self.config.netuid)
+ else:
+ batch_size = self.subtensor.validator_batch_size
+ sequence_length = self.subtensor.validator_sequence_length
+ # Number of tokens to prune for phrase validation beyond sequence context
+ prune_len = self.config.neuron.prune_len = self.subtensor.validator_prune_len
+ self.config.nucleus.logits_divergence = self.subtensor.validator_logits_divergence
+ min_allowed_weights = self.subtensor.min_allowed_weights
+ max_weight_limit = self.subtensor.max_weight_limit
+ self.config.nucleus.scaling_law_power = self.subtensor.scaling_law_power
+ self.config.nucleus.synergy_scaling_law_power = self.subtensor.synergy_scaling_law_power
+
current_block = self.subtensor.block
- batch_size = self.subtensor.validator_batch_size(netuid=self.config.netuid)
- sequence_length = self.subtensor.validator_sequence_length(netuid=self.config.netuid)
validation_len = self.config.neuron.validation_len # Number of tokens to holdout for phrase validation beyond sequence context
- # Number of tokens to prune for phrase validation beyond sequence context
- prune_len = self.config.neuron.prune_len = self.subtensor.validator_prune_len(netuid=self.config.netuid)
- self.config.nucleus.logits_divergence = self.subtensor.validator_logits_divergence(netuid=self.config.netuid)
- min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid)
- max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid)
- blocks_per_epoch = self.subtensor.validator_epoch_length(netuid=self.config.netuid) if self.config.neuron.blocks_per_epoch == -1 else self.config.neuron.blocks_per_epoch
- epochs_until_reset = self.subtensor.validator_epochs_per_reset(netuid=self.config.netuid) if self.config.neuron.epochs_until_reset == -1 else self.config.neuron.epochs_until_reset
- self.config.nucleus.scaling_law_power = self.subtensor.scaling_law_power(netuid=self.config.netuid)
- self.config.nucleus.synergy_scaling_law_power = self.subtensor.synergy_scaling_law_power(netuid=self.config.netuid)
+ epochs_until_reset = self.get_validator_epochs_per_reset() if self.config.neuron.epochs_until_reset == -1 else self.config.neuron.epochs_until_reset
+ blocks_per_epoch = self.get_validator_epoch_length()
# === Update dataset size ===
if (batch_size != self.dataset.batch_size) or (sequence_length + validation_len + prune_len != self.dataset.block_size):
@@ -515,8 +533,8 @@ def run_epoch( self ):
# console table - weight table (every validation step)
sample_uids, sample_weights = self.calculate_weights()
self.vlogger.print_weights_table(
- min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid),
- max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid),
+ min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.min_allowed_weights,
+ max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.max_weight_limit,
neuron_stats = self.neuron_stats,
title = str(self),
metagraph_n = self.metagraph.n,
@@ -553,22 +571,22 @@ def run_epoch( self ):
weights=sample_weights.detach().to('cpu'),
netuid = self.config.netuid,
wallet=self.wallet,
- version_key=bittensor.__version_as_int__, # TODO: correct?
+ version_key=1,
wait_for_finalization=self.config.neuron.wait_for_finalization,
)
# === ALL end of epoch logging (including console message, console table, prometheus, wandb)===
if self.config.logging.debug or self.config.logging.trace:
# console table - weight table (every end of epoch)
- self.vlogger.print_weights_table(
- min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid),
- max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid),
- neuron_stats = self.neuron_stats,
- title = str(self),
- metagraph_n = self.metagraph.n,
- sample_uids = sample_uids,
- sample_weights = sample_weights,
- )
+ self.vlogger.print_weights_table(
+ min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.min_allowed_weights,
+ max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.min_allowed_weights,
+ neuron_stats = self.neuron_stats,
+ title = str(self),
+ metagraph_n = self.metagraph.n,
+ sample_uids = sample_uids,
+ sample_weights = sample_weights,
+ )
# console message - subtensor weight (every end of epoch)
self.vlogger.print_console_subtensor_weight(
@@ -708,8 +726,9 @@ def calculate_weights(self):
weight_key = self.weight_key + '!' # use zeroing key to penalize non-responsive neurons
- min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid)
- max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid)
+ min_allowed_weights = self.subtensor.min_allowed_weights(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.min_allowed_weights
+ max_weight_limit = self.subtensor.max_weight_limit(netuid=self.config.netuid) if self.config.subtensor.network == 'finney' else self.subtensor.max_weight_limit
+
# === Populate neuron weights ===
neuron_weights = torch.zeros_like(self.metagraph.S) # allow unevaluated UIDs for min_allowed_weights
@@ -727,7 +746,11 @@ def calculate_weights(self):
# === Exclude lowest quantile from weight setting ===
max_exclude = (len(sample_weights) - min_allowed_weights) / len(sample_weights) # max excludable weight quantile
- quantile = self.subtensor.validator_exclude_quantile(netuid=self.config.netuid) if self.config.neuron.exclude_quantile == -1 else self.config.neuron.exclude_quantile
+
+ if self.config.subtensor.network == 'finney':
+ quantile = self.subtensor.validator_exclude_quantile(netuid=self.config.netuid) if self.config.neuron.exclude_quantile == -1 else self.config.neuron.exclude_quantile
+ else:
+ quantile = self.subtensor.validator_exclude_quantile if self.config.neuron.exclude_quantile == -1 else self.config.neuron.exclude_quantile
if 0 < max_exclude:
exclude_quantile = min([quantile , max_exclude]) # reduce quantile to meet min_allowed_weights
lowest_quantile = sample_weights.quantile(exclude_quantile) # find lowest quantile threshold
@@ -745,6 +768,16 @@ def calculate_weights(self):
return sample_uids, sample_weights
+ def get_validator_epoch_length(self):
+ validator_epoch_length = self.subtensor.validator_epoch_length(self.config.netuid) if self.subtensor.network == 'finney' else self.subtensor.validator_epoch_length
+
+ return validator_epoch_length
+
+ def get_validator_epochs_per_reset(self):
+ validator_epochs_per_reset = self.subtensor.validator_epochs_per_reset(self.config.netuid) if self.subtensor.network == 'finney' else self.subtensor.validator_epochs_per_reset
+
+ return validator_epochs_per_reset
+
class nucleus( torch.nn.Module ):
""" Nucleus class which holds the validator model.
"""
@@ -752,12 +785,19 @@ def __init__( self, config, device, subtensor, vlogger ):
super(nucleus, self).__init__()
self.config = config
self.vlogger = vlogger
- self.config.nucleus.logits_divergence = subtensor.validator_logits_divergence(netuid=self.config.netuid) if self.config.nucleus.logits_divergence == -1 else self.config.nucleus.logits_divergence
- self.config.nucleus.scaling_law_power = subtensor.scaling_law_power(netuid=self.config.netuid) if self.config.nucleus.scaling_law_power == -1 else self.config.nucleus.scaling_law_power
- self.config.nucleus.synergy_scaling_law_power = subtensor.synergy_scaling_law_power(netuid=self.config.netuid) if self.config.nucleus.synergy_scaling_law_power == -1 else self.config.nucleus.synergy_scaling_law_power
+
+ if self.config.subtensor.network == 'finney':
+ self.config.nucleus.logits_divergence = subtensor.validator_logits_divergence(netuid=self.config.netuid) if self.config.nucleus.logits_divergence == -1 else self.config.nucleus.logits_divergence
+ self.config.nucleus.scaling_law_power = subtensor.scaling_law_power(netuid=self.config.netuid) if self.config.nucleus.scaling_law_power == -1 else self.config.nucleus.scaling_law_power
+ self.config.nucleus.synergy_scaling_law_power = subtensor.synergy_scaling_law_power(netuid=self.config.netuid) if self.config.nucleus.synergy_scaling_law_power == -1 else self.config.nucleus.synergy_scaling_law_power
+ self.max_n = subtensor.max_n(netuid=self.config.netuid)
+ else:
+ self.config.nucleus.logits_divergence = subtensor.validator_logits_divergence if self.config.nucleus.logits_divergence == -1 else self.config.nucleus.logits_divergence
+ self.config.nucleus.scaling_law_power = subtensor.scaling_law_power if self.config.nucleus.scaling_law_power == -1 else self.config.nucleus.scaling_law_power
+ self.config.nucleus.synergy_scaling_law_power = subtensor.synergy_scaling_law_power if self.config.nucleus.synergy_scaling_law_power == -1 else self.config.nucleus.synergy_scaling_law_power
+ self.max_n = subtensor.max_n
self.device = device
- self.max_n = subtensor.max_n(netuid=self.config.netuid)
self.permute_uids = [] # iterable of next UIDs to query, reset to permuted UIDs when empty
tokenizer = bittensor.tokenizer()
@@ -1533,3 +1573,5 @@ def unsuccess(_name, _unsuccessful):
for _uid, _return_op, _time in _unsuccessful:
unsuccess_txt += f'{_uid}[{_return_op} {_time:.2f}] '
logger.info(unsuccess_txt)
+
+
diff --git a/bittensor/_subtensor/__init__.py b/bittensor/_subtensor/__init__.py
index 268dcadc53..9c8b747e00 100644
--- a/bittensor/_subtensor/__init__.py
+++ b/bittensor/_subtensor/__init__.py
@@ -26,7 +26,7 @@
from torch.cuda import is_available as is_cuda_available
from bittensor.utils import strtobool_with_default
-
+from .naka_subtensor_impl import Subtensor as Nakamoto_subtensor
from . import subtensor_impl, subtensor_mock
logger = logger.opt(colors=True)
@@ -117,11 +117,19 @@ def __new__(
)
subtensor.check_config( config )
- return subtensor_impl.Subtensor(
- substrate = substrate,
- network = config.subtensor.get('network', bittensor.defaults.subtensor.network),
- chain_endpoint = config.subtensor.chain_endpoint,
- )
+ network = config.subtensor.get('network', bittensor.defaults.subtensor.network)
+ if network == 'nakamoto':
+ return Nakamoto_subtensor(
+ substrate = substrate,
+ network = config.subtensor.get('network', bittensor.defaults.subtensor.network),
+ chain_endpoint = config.subtensor.chain_endpoint,
+ )
+ elif network =='finney':
+ return subtensor_impl.Subtensor(
+ substrate = substrate,
+ network = config.subtensor.get('network', bittensor.defaults.subtensor.network),
+ chain_endpoint = config.subtensor.chain_endpoint,
+ )
@staticmethod
def config() -> 'bittensor.Config':
diff --git a/bittensor/_subtensor/naka_subtensor_impl.py b/bittensor/_subtensor/naka_subtensor_impl.py
new file mode 100644
index 0000000000..763c632c62
--- /dev/null
+++ b/bittensor/_subtensor/naka_subtensor_impl.py
@@ -0,0 +1,1762 @@
+# The MIT License (MIT)
+# Copyright © 2021 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+import torch
+from rich.prompt import Confirm, Prompt
+from typing import List, Dict, Union, Optional
+from multiprocessing import Process
+
+import bittensor
+from tqdm import tqdm
+import bittensor.utils.networking as net
+import bittensor.utils.weight_utils as weight_utils
+from retry import retry
+from substrateinterface import SubstrateInterface
+from bittensor.utils.balance import Balance
+from bittensor.utils import is_valid_bittensor_address_or_public_key
+from bittensor.utils.registratrion_old import create_pow
+from types import SimpleNamespace
+
+# Mocking imports
+import os
+import random
+import scalecodec
+import time
+import subprocess
+from sys import platform
+
+from loguru import logger
+logger = logger.opt(colors=True)
+
+class Subtensor:
+ """
+ Handles interactions with the subtensor chain.
+ """
+ def __init__(
+ self,
+ substrate: 'SubstrateInterface',
+ network: str,
+ chain_endpoint: str,
+ ):
+ r""" Initializes a subtensor chain interface.
+ Args:
+ substrate (:obj:`SubstrateInterface`, `required`):
+ substrate websocket client.
+ network (default='local', type=str)
+ The subtensor network flag. The likely choices are:
+ -- local (local running network)
+ -- nobunaga (staging network)
+ -- nakamoto (main network)
+ If this option is set it overloads subtensor.chain_endpoint with
+ an entry point node from that network.
+ chain_endpoint (default=None, type=str)
+ The subtensor endpoint flag. If set, overrides the network argument.
+ """
+ self.network = network
+ self.chain_endpoint = chain_endpoint
+ self.substrate = substrate
+
+ def __str__(self) -> str:
+ if self.network == self.chain_endpoint:
+ # Connecting to chain endpoint without network known.
+ return "Subtensor({})".format( self.chain_endpoint )
+ else:
+ # Connecting to network with endpoint known.
+ return "Subtensor({}, {})".format( self.network, self.chain_endpoint )
+
+ def __repr__(self) -> str:
+ return self.__str__()
+
+ def endpoint_for_network(
+ self,
+ blacklist: List[str] = []
+ ) -> str:
+ r""" Returns a chain endpoint based on self.network.
+ Returns None if there are no available endpoints.
+ """
+
+ # Chain endpoint overrides the --network flag.
+ if self.chain_endpoint != None:
+ if self.chain_endpoint in blacklist:
+ return None
+ else:
+ return self.chain_endpoint
+
+ def connect( self, timeout: int = 10, failure = True ) -> bool:
+ attempted_endpoints = []
+ while True:
+ def connection_error_message():
+ print('''
+ Check that your internet connection is working and the chain endpoints are available: {}
+ The subtensor.network should likely be one of the following choices:
+ -- local - (your locally running node)
+ -- nobunaga - (staging)
+ -- nakamoto - (main)
+ Or you may set the endpoint manually using the --subtensor.chain_endpoint flag
+ To run a local node (See: docs/running_a_validator.md) \n
+ '''.format( attempted_endpoints) )
+
+ # ---- Get next endpoint ----
+ ws_chain_endpoint = self.endpoint_for_network( blacklist = attempted_endpoints )
+ if ws_chain_endpoint == None:
+ logger.error("No more endpoints available for subtensor.network: {}, attempted: {}".format(self.network, attempted_endpoints))
+ connection_error_message()
+ if failure:
+ logger.critical('Unable to connect to network:{}.\nMake sure your internet connection is stable and the network is properly set.'.format(self.network))
+ else:
+ return False
+ attempted_endpoints.append(ws_chain_endpoint)
+
+ # --- Attempt connection ----
+ try:
+ with self.substrate:
+ logger.success("Network:".ljust(20) + "{}", self.network)
+ logger.success("Endpoint:".ljust(20) + "{}", ws_chain_endpoint)
+ return True
+
+ except Exception:
+ logger.error( "Error while connecting to network:{} at endpoint: {}".format(self.network, ws_chain_endpoint))
+ connection_error_message()
+ if failure:
+ raise RuntimeError('Unable to connect to network:{}.\nMake sure your internet connection is stable and the network is properly set.'.format(self.network))
+ else:
+ return False
+
+ @property
+ def rho (self) -> int:
+ r""" Incentive mechanism rho parameter.
+ Returns:
+ rho (int):
+ Incentive mechanism rho parameter.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'Rho' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def kappa (self) -> int:
+ r""" Incentive mechanism kappa parameter.
+ Returns:
+ kappa (int):
+ Incentive mechanism kappa parameter.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'Kappa' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def difficulty (self) -> int:
+ r""" Returns registration difficulty from the chain.
+ Returns:
+ difficulty (int):
+ Registration difficulty.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'Difficulty' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def total_issuance (self) -> 'bittensor.Balance':
+ r""" Returns the total token issuance.
+ Returns:
+ total_issuance (int):
+ Total issuance as balance.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return bittensor.Balance.from_rao( substrate.query( module='SubtensorModule', storage_function = 'TotalIssuance').value )
+ return make_substrate_call_with_retry()
+
+ @property
+ def immunity_period (self) -> int:
+ r""" Returns the chain registration immunity_period
+ Returns:
+ immunity_period (int):
+ Chain registration immunity_period
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'ImmunityPeriod' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def validator_batch_size (self) -> int:
+ r""" Returns the chain default validator batch size.
+ Returns:
+ batch_size (int):
+ Chain default validator batch size.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'ValidatorBatchSize' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def validator_sequence_length (self) -> int:
+ r""" Returns the chain default validator sequence length.
+ Returns:
+ sequence_length (int):
+ Chain default validator sequence length.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'ValidatorSequenceLength' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def validator_epochs_per_reset (self) -> int:
+ r""" Epochs passed before the validator resets its weights.
+ Returns:
+ validator_epochs_per_reset (int):
+ Epochs passed before the validator resets its weights.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'ValidatorEpochsPerReset' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def validator_epoch_length (self) -> int:
+ r""" Default validator epoch length.
+ Returns:
+ validator_epoch_length (int):
+ Default validator epoch length.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'ValidatorEpochLen' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def total_stake (self) -> 'bittensor.Balance':
+ r""" Returns total stake on the chain.
+ Returns:
+ total_stake (bittensor.Balance):
+ Total stake as balance.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return bittensor.Balance.from_rao( substrate.query( module='SubtensorModule', storage_function = 'TotalStake' ).value )
+ return make_substrate_call_with_retry()
+
+ @property
+ def min_allowed_weights (self) -> int:
+ r""" Returns min allowed number of weights.
+ Returns:
+ min_allowed_weights (int):
+ Min number of weights allowed to be set.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'MinAllowedWeights' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def max_weight_limit (self) -> int:
+ r""" Returns MaxWeightLimit
+ Returns:
+ max_weight (int):
+ the max value for weights after normalizaiton
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ U32_MAX = 4294967295
+ return substrate.query( module='SubtensorModule', storage_function = 'MaxWeightLimit' ).value/U32_MAX
+ return make_substrate_call_with_retry()
+
+ @property
+ def scaling_law_power (self) -> int:
+ r""" Returns ScalingLawPower
+ Returns:
+ ScalingLawPower (float):
+ the power term attached to scaling law
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ MAX = 100
+ return substrate.query( module='SubtensorModule', storage_function = 'ScalingLawPower' ).value/MAX
+ return make_substrate_call_with_retry()
+
+ @property
+ def synergy_scaling_law_power (self) -> int:
+ r""" Returns SynergyScalingLawPower
+ Returns:
+ SynergyScalingLawPower (float):
+ the term attached to synergy calculation during shapley scores
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ MAX = 100
+ return substrate.query( module='SubtensorModule', storage_function = 'SynergyScalingLawPower' ).value/MAX
+ return make_substrate_call_with_retry()
+
+ @property
+ def validator_exclude_quantile (self) -> int:
+ r""" Returns ValidatorExcludeQuantile
+ Returns:
+ ValidatorExcludeQuantile (float):
+ the quantile that validators should exclude when setting their weights
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ MAX = 100
+ return substrate.query( module='SubtensorModule', storage_function = 'ValidatorExcludeQuantile' ).value/MAX
+ return make_substrate_call_with_retry()
+
+ @property
+ def max_allowed_min_max_ratio(self) -> int:
+ r""" Returns the chains max_allowed_min_max_ratio
+ Returns:
+ max_allowed_min_max_ratio (int):
+ The max ratio allowed between the min and max.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'MaxAllowedMaxMinRatio' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def n (self) -> int:
+ r""" Returns total number of neurons on the chain.
+ Returns:
+ n (int):
+ Total number of neurons on chain.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'N' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def max_n (self) -> int:
+ r""" Returns maximum number of neuron positions on the graph.
+ Returns:
+ max_n (int):
+ Maximum number of neuron positions on the graph.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'MaxAllowedUids' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def block (self) -> int:
+ r""" Returns current chain block.
+ Returns:
+ block (int):
+ Current chain block.
+ """
+ return self.get_current_block()
+
+ @property
+ def blocks_since_epoch (self) -> int:
+ r""" Returns blocks since last epoch.
+ Returns:
+ blocks_since_epoch (int):
+ blocks_since_epoch
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'BlocksSinceLastStep' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def blocks_per_epoch (self) -> int:
+ r""" Returns blocks per chain epoch.
+ Returns:
+ blocks_per_epoch (int):
+ blocks_per_epoch
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'BlocksPerStep' ).value
+ return make_substrate_call_with_retry()
+
+ def get_n (self, block: int = None) -> int:
+ r""" Returns total number of neurons on the chain.
+ Returns:
+ n (int):
+ Total number of neurons on chain.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query(
+ module='SubtensorModule',
+ storage_function = 'N',
+ block_hash = None if block == None else substrate.get_block_hash( block )
+ ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def validator_prune_len (self) -> int:
+ r""" Returns PruneLen
+ Returns:
+ prune_len (int):
+ the number of pruned tokens from each requests
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query( module='SubtensorModule', storage_function = 'ValidatorPruneLen' ).value
+ return make_substrate_call_with_retry()
+
+ @property
+ def validator_logits_divergence (self) -> int:
+ r""" Returns logits_divergence
+ Returns:
+ logits_divergence (int):
+ the divergence value for logit distances, a measure for anomaly detection
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ U64MAX = 18446744073709551615
+ return substrate.query( module='SubtensorModule', storage_function = 'ValidatorLogitsDivergence' ).value/U64MAX
+ return make_substrate_call_with_retry()
+
+ def serve_axon (
+ self,
+ axon: 'bittensor.Axon',
+ use_upnpc: bool = False,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ prompt: bool = False,
+ ) -> bool:
+ r""" Serves the axon to the network.
+ Args:
+ axon (bittensor.Axon):
+ Axon to serve.
+ use_upnpc (:type:bool, `optional`):
+ If true, the axon attempts port forward through your router before
+ subscribing.
+ wait_for_inclusion (bool):
+ If set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ If set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ axon.wallet.hotkey
+ axon.wallet.coldkeypub
+
+ # ---- Setup UPNPC ----
+ if use_upnpc:
+ if prompt:
+ if not Confirm.ask("Attempt port forwarding with upnpc?"):
+ return False
+ try:
+ external_port = net.upnpc_create_port_map( port = axon.port )
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Forwarded port: {}[/green]".format( axon.port ))
+ bittensor.logging.success(prefix = 'Forwarded port', sufix = '{}'.format( axon.port ))
+ except net.UPNPCException as upnpc_exception:
+ raise RuntimeError('Failed to hole-punch with upnpc with exception {}'.format( upnpc_exception )) from upnpc_exception
+ else:
+ external_port = axon.external_port
+
+ # ---- Get external ip ----
+ if axon.external_ip == None:
+ try:
+ external_ip = net.get_external_ip()
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Found external ip: {}[/green]".format( external_ip ))
+ bittensor.logging.success(prefix = 'External IP', sufix = '{}'.format( external_ip ))
+ except Exception as E:
+ raise RuntimeError('Unable to attain your external ip. Check your internet connection. error: {}'.format(E)) from E
+ else:
+ external_ip = axon.external_ip
+
+ # ---- Subscribe to chain ----
+ serve_success = self.serve(
+ wallet = axon.wallet,
+ ip = external_ip,
+ port = external_port,
+ modality = 0,
+ wait_for_inclusion = wait_for_inclusion,
+ wait_for_finalization = wait_for_finalization,
+ prompt = prompt
+ )
+ return serve_success
+
+ def register (
+ self,
+ wallet: 'bittensor.Wallet',
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ prompt: bool = False,
+ max_allowed_attempts: int = 3,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[List[int], int] = 0,
+ TPB: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ log_verbose: bool = False,
+ netuid: int = None,
+ ) -> bool:
+ r""" Registers the wallet to chain.
+ Args:
+ wallet (bittensor.wallet):
+ bittensor wallet object.
+ wait_for_inclusion (bool):
+ If set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ If set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ max_allowed_attempts (int):
+ Maximum number of attempts to register the wallet.
+ cuda (bool):
+ If true, the wallet should be registered using CUDA device(s).
+ dev_id (Union[List[int], int]):
+ The CUDA device id to use, or a list of device ids.
+ TPB (int):
+ The number of threads per block (CUDA).
+ num_processes (int):
+ The number of processes to use to register.
+ update_interval (int):
+ The number of nonces to solve between updates.
+ log_verbose (bool):
+ If true, the registration process will log more information.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+
+ with bittensor.__console__.status(":satellite: Checking Account..."):
+ neuron = self.neuron_for_pubkey( wallet.hotkey.ss58_address )
+ if not neuron.is_null:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Already Registered[/green]:\n uid: [bold white]{}[/bold white]\n hotkey: [bold white]{}[/bold white]\n coldkey: [bold white]{}[/bold white]".format(neuron.uid, neuron.hotkey, neuron.coldkey))
+ return True
+
+ if prompt:
+ if not Confirm.ask("Continue Registration?\n hotkey: [bold white]{}[/bold white]\n coldkey: [bold white]{}[/bold white]\n network: [bold white]{}[/bold white]".format( wallet.hotkey.ss58_address, wallet.coldkeypub.ss58_address, self.network ) ):
+ return False
+
+ # Attempt rolling registration.
+ attempts = 1
+ while True:
+ bittensor.__console__.print(":satellite: Registering...({}/{})".format(attempts, max_allowed_attempts))
+ # Solve latest POW.
+ if cuda:
+ if not torch.cuda.is_available():
+ if prompt:
+ bittensor.__console__.error('CUDA is not available.')
+ return False
+ pow_result = create_pow( self, wallet, output_in_place, cuda, dev_id, TPB, num_processes=num_processes, update_interval=update_interval, log_verbose=log_verbose )
+ else:
+ pow_result = create_pow( self, wallet, output_in_place, num_processes=num_processes, update_interval=update_interval, log_verbose=log_verbose )
+
+ # pow failed
+ if not pow_result:
+ # might be registered already
+ if (wallet.is_registered( self )):
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Registered[/green]")
+ return True
+
+ # pow successful, proceed to submit pow to chain for registration
+ else:
+ with bittensor.__console__.status(":satellite: Submitting POW..."):
+ # check if pow result is still valid
+ while bittensor.utils.POWNotStale(self, pow_result):
+ with self.substrate as substrate:
+ # create extrinsic call
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='register',
+ call_params={
+ 'block_number': pow_result['block_number'],
+ 'nonce': pow_result['nonce'],
+ 'work': bittensor.utils.hex_bytes_to_u8_list( pow_result['work'] ),
+ 'hotkey': wallet.hotkey.ss58_address,
+ 'coldkey': wallet.coldkeypub.ss58_address
+ }
+ )
+ extrinsic = substrate.create_signed_extrinsic( call = call, keypair = wallet.hotkey )
+ response = substrate.submit_extrinsic( extrinsic, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization )
+
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Sent[/green]")
+ return True
+
+ # process if registration successful, try again if pow is still valid
+ response.process_events()
+ if not response.is_success:
+ if 'key is already registered' in response.error_message:
+ # Error meant that the key is already registered.
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Already Registered[/green]")
+ return True
+
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: error:{}".format(response.error_message))
+ time.sleep(0.5)
+
+ # Successful registration, final check for neuron and pubkey
+ else:
+ bittensor.__console__.print(":satellite: Checking Balance...")
+ neuron = self.neuron_for_pubkey( wallet.hotkey.ss58_address )
+ if not neuron.is_null:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Registered[/green]")
+ return True
+ else:
+ # neuron not found, try again
+ bittensor.__console__.print(":cross_mark: [red]Unknown error. Neuron not found.[/red]")
+ continue
+ else:
+ # Exited loop because pow is no longer valid.
+ bittensor.__console__.print( "[red]POW is stale.[/red]" )
+ # Try again.
+ continue
+
+ if attempts < max_allowed_attempts:
+ #Failed registration, retry pow
+ attempts += 1
+ bittensor.__console__.print( ":satellite: Failed registration, retrying pow ...({}/{})".format(attempts, max_allowed_attempts))
+ else:
+ # Failed to register after max attempts.
+ bittensor.__console__.print( "[red]No more attempts.[/red]" )
+ return False
+
+ def serve (
+ self,
+ wallet: 'bittensor.wallet',
+ ip: str,
+ port: int,
+ modality: int,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization = True,
+ prompt: bool = False,
+ netuid: int = None,
+ ) -> bool:
+ r""" Subscribes an bittensor endpoint to the substensor chain.
+ Args:
+ wallet (bittensor.wallet):
+ bittensor wallet object.
+ ip (str):
+ endpoint host port i.e. 192.122.31.4
+ port (int):
+ endpoint port number i.e. 9221
+ modality (int):
+ int encoded endpoint modality i.e 0 for TEXT
+ wait_for_inclusion (bool):
+ if set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ if set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+
+ # Decrypt hotkey
+ wallet.hotkey
+
+ params = {
+ 'version': bittensor.__version_as_int__,
+ 'ip': net.ip_to_int(ip),
+ 'port': port,
+ 'ip_type': net.ip_version(ip),
+ 'modality': modality,
+ 'coldkey': wallet.coldkeypub.ss58_address,
+ }
+
+ with bittensor.__console__.status(":satellite: Checking Axon..."):
+ neuron = self.neuron_for_pubkey( wallet.hotkey.ss58_address )
+ neuron_up_to_date = not neuron.is_null and params == {
+ 'version': neuron.version,
+ 'ip': neuron.ip,
+ 'port': neuron.port,
+ 'ip_type': neuron.ip_type,
+ 'modality': neuron.modality,
+ 'coldkey': neuron.coldkey
+ }
+ if neuron_up_to_date:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Already Served[/green]\n [bold white]ip: {}\n port: {}\n modality: {}\n hotkey: {}\n coldkey: {}[/bold white]".format(ip, port, modality, wallet.hotkey.ss58_address, wallet.coldkeypub.ss58_address))
+ return True
+
+ if prompt:
+ if not Confirm.ask("Do you want to serve axon:\n [bold white]ip: {}\n port: {}\n modality: {}\n hotkey: {}\n coldkey: {}[/bold white]".format(ip, port, modality, wallet.hotkey.ss58_address, wallet.coldkeypub.ss58_address)):
+ return False
+
+ with bittensor.__console__.status(":satellite: Serving axon on: [white]{}[/white] ...".format(self.network)):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='serve_axon',
+ call_params=params
+ )
+ extrinsic = substrate.create_signed_extrinsic( call = call, keypair = wallet.hotkey)
+ response = substrate.submit_extrinsic( extrinsic, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization )
+ if wait_for_inclusion or wait_for_finalization:
+ response.process_events()
+ if response.is_success:
+ bittensor.__console__.print(':white_heavy_check_mark: [green]Served[/green]\n [bold white]ip: {}\n port: {}\n modality: {}\n hotkey: {}\n coldkey: {}[/bold white]'.format(ip, port, modality, wallet.hotkey.ss58_address, wallet.coldkeypub.ss58_address ))
+ return True
+ else:
+ bittensor.__console__.print(':cross_mark: [green]Failed to Subscribe[/green] error: {}'.format(response.error_message))
+ return False
+ else:
+ return True
+
+ def add_stake(
+ self,
+ wallet: 'bittensor.wallet',
+ amount: Union[Balance, float] = None,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ prompt: bool = False,
+ ) -> bool:
+ r""" Adds the specified amount of stake to passed hotkey uid.
+ Args:
+ wallet (bittensor.wallet):
+ Bittensor wallet object.
+ amount (Union[Balance, float]):
+ Amount to stake as bittensor balance, or float interpreted as Tao.
+ wait_for_inclusion (bool):
+ If set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ If set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ # Decrypt keys,
+ wallet.coldkey
+ wallet.hotkey
+
+ with bittensor.__console__.status(":satellite: Syncing with chain: [white]{}[/white] ...".format(self.network)):
+ old_balance = self.get_balance( wallet.coldkey.ss58_address )
+ neuron = self.neuron_for_pubkey( ss58_hotkey = wallet.hotkey.ss58_address )
+ if neuron.is_null:
+ bittensor.__console__.print(":cross_mark: [red]Hotkey: {} is not registered.[/red]".format(wallet.hotkey_str))
+ return False
+
+ # Covert to bittensor.Balance
+ if amount == None:
+ # Stake it all.
+ staking_balance = bittensor.Balance.from_tao( old_balance.tao )
+ elif not isinstance(amount, bittensor.Balance ):
+ staking_balance = bittensor.Balance.from_tao( amount )
+ else:
+ staking_balance = amount
+
+ # Remove existential balance to keep key alive.
+ if staking_balance > bittensor.Balance.from_rao( 1000 ):
+ staking_balance = staking_balance - bittensor.Balance.from_rao( 1000 )
+ else:
+ staking_balance = staking_balance
+
+ # Estimate transfer fee.
+ staking_fee = None # To be filled.
+ with bittensor.__console__.status(":satellite: Estimating Staking Fees..."):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='add_stake',
+ call_params={
+ 'hotkey': wallet.hotkey.ss58_address,
+ 'ammount_staked': staking_balance.rao
+ }
+ )
+ payment_info = substrate.get_payment_info(call = call, keypair = wallet.coldkey)
+ if payment_info:
+ staking_fee = bittensor.Balance.from_rao(payment_info['partialFee'])
+ bittensor.__console__.print("[green]Estimated Fee: {}[/green]".format( staking_fee ))
+ else:
+ staking_fee = bittensor.Balance.from_tao( 0.2 )
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: could not estimate staking fee, assuming base fee of 0.2")
+
+ # Check enough to unstake.
+ if staking_balance > old_balance + staking_fee:
+ bittensor.__console__.print(":cross_mark: [red]Not enough stake[/red]:[bold white]\n balance:{}\n amount: {}\n fee: {}\n coldkey: {}[/bold white]".format(old_balance, staking_balance, staking_fee, wallet.name))
+ return False
+
+ # Ask before moving on.
+ if prompt:
+ if not Confirm.ask("Do you want to stake:[bold white]\n amount: {}\n to: {}\n fee: {}[/bold white]".format( staking_balance, wallet.hotkey_str, staking_fee) ):
+ return False
+
+ with bittensor.__console__.status(":satellite: Staking to: [bold white]{}[/bold white] ...".format(self.network)):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='add_stake',
+ call_params={
+ 'hotkey': wallet.hotkey.ss58_address,
+ 'ammount_staked': staking_balance.rao
+ }
+ )
+ extrinsic = substrate.create_signed_extrinsic( call = call, keypair = wallet.coldkey )
+ response = substrate.submit_extrinsic( extrinsic, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Sent[/green]")
+ return True
+
+ if response.is_success:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Finalized[/green]")
+ else:
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: error:{}".format(response.error_message))
+
+ if response.is_success:
+ with bittensor.__console__.status(":satellite: Checking Balance on: [white]{}[/white] ...".format(self.network)):
+ new_balance = self.get_balance( wallet.coldkey.ss58_address )
+ old_stake = bittensor.Balance.from_tao( neuron.stake )
+ new_stake = bittensor.Balance.from_tao( self.neuron_for_pubkey( ss58_hotkey = wallet.hotkey.ss58_address ).stake)
+ bittensor.__console__.print("Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( old_balance, new_balance ))
+ bittensor.__console__.print("Stake:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( old_stake, new_stake ))
+ return True
+
+ return False
+
+ def add_stake_multiple (
+ self,
+ wallets: List['bittensor.wallet'],
+ amounts: List[Union[Balance, float]] = None,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ prompt: bool = False,
+ ) -> bool:
+ r""" Adds stake to each wallet hotkey in the list, using each amount, from the common coldkey.
+ Args:
+ wallets (List[bittensor.wallet]):
+ List of wallets to stake.
+ amounts (List[Union[Balance, float]]):
+ List of amounts to stake. If None, stake all to the first hotkey.
+ wait_for_inclusion (bool):
+ if set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ if set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or included in the block.
+ flag is true if any wallet was staked.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ if not isinstance(wallets, list):
+ raise TypeError("wallets must be a list of bittensor.wallet")
+
+ if len(wallets) == 0:
+ return True
+
+
+ if amounts is not None and len(amounts) != len(wallets):
+ raise ValueError("amounts must be a list of the same length as wallets")
+
+ if amounts is not None and not all(isinstance(amount, (Balance, float)) for amount in amounts):
+ raise TypeError("amounts must be a [list of bittensor.Balance or float] or None")
+
+ if amounts is None:
+ amounts = [None] * len(wallets)
+ else:
+ # Convert to Balance
+ amounts = [bittensor.Balance.from_tao(amount) if isinstance(amount, float) else amount for amount in amounts ]
+
+ if sum(amount.tao for amount in amounts) == 0:
+ # Staking 0 tao
+ return True
+
+ wallet_0: 'bittensor.wallet' = wallets[0]
+ # Decrypt coldkey for all wallet(s) to use
+ wallet_0.coldkey
+
+ neurons = []
+ with bittensor.__console__.status(":satellite: Syncing with chain: [white]{}[/white] ...".format(self.network)):
+ old_balance = self.get_balance( wallet_0.coldkey.ss58_address )
+
+ for wallet in wallets:
+ neuron = self.neuron_for_pubkey( ss58_hotkey = wallet.hotkey.ss58_address )
+
+ if neuron.is_null:
+ neurons.append( None )
+ continue
+
+ neurons.append( neuron )
+
+ # Remove existential balance to keep key alive.
+ ## Keys must maintain a balance of at least 1000 rao to stay alive.
+ total_staking_rao = sum([amount.rao if amount is not None else 0 for amount in amounts])
+ if total_staking_rao == 0:
+ # Staking all to the first wallet.
+ if old_balance.rao > 1000:
+ old_balance -= bittensor.Balance.from_rao(1000)
+
+ elif total_staking_rao < 1000:
+ # Staking less than 1000 rao to the wallets.
+ pass
+ else:
+ # Staking more than 1000 rao to the wallets.
+ ## Reduce the amount to stake to each wallet to keep the balance above 1000 rao.
+ percent_reduction = 1 - (1000 / total_staking_rao)
+ amounts = [Balance.from_tao(amount.tao * percent_reduction) for amount in amounts]
+
+ successful_stakes = 0
+ for wallet, amount, neuron in zip(wallets, amounts, neurons):
+ if neuron is None:
+ bittensor.__console__.print(":cross_mark: [red]Hotkey: {} is not registered. Skipping ...[/red]".format( wallet.hotkey_str ))
+ continue
+
+ if wallet.coldkeypub.ss58_address != wallet_0.coldkeypub.ss58_address:
+ bittensor.__console__.print(":cross_mark: [red]Hotkey: {} is not under the same coldkey. Skipping ...[/red]".format( wallet.hotkey_str ))
+ continue
+
+ # Assign decrypted coldkey from wallet_0
+ # so we don't have to decrypt again
+ wallet._coldkey = wallet_0.coldkey
+ staking_all = False
+ # Convert to bittensor.Balance
+ if amount == None:
+ # Stake it all.
+ staking_balance = bittensor.Balance.from_tao( old_balance.tao )
+ staking_all = True
+ else:
+ # Amounts are cast to balance earlier in the function
+ assert isinstance(amount, bittensor.Balance)
+ staking_balance = amount
+
+ # Estimate staking fee.
+ stake_fee = None # To be filled.
+ with bittensor.__console__.status(":satellite: Estimating Staking Fees..."):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='add_stake',
+ call_params={
+ 'hotkey': wallet.hotkey.ss58_address,
+ 'ammount_staked': staking_balance.rao
+ }
+ )
+ payment_info = substrate.get_payment_info(call = call, keypair = wallet.coldkey)
+ if payment_info:
+ stake_fee = bittensor.Balance.from_rao(payment_info['partialFee'])
+ bittensor.__console__.print("[green]Estimated Fee: {}[/green]".format( stake_fee ))
+ else:
+ stake_fee = bittensor.Balance.from_tao( 0.2 )
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: could not estimate staking fee, assuming base fee of 0.2")
+
+ # Check enough to stake
+ if staking_all:
+ staking_balance -= stake_fee
+ max(staking_balance, bittensor.Balance.from_tao(0))
+
+ if staking_balance > old_balance - stake_fee:
+ bittensor.__console__.print(":cross_mark: [red]Not enough balance[/red]: [green]{}[/green] to stake: [blue]{}[/blue] from coldkey: [white]{}[/white]".format(old_balance, staking_balance, wallet.name))
+ continue
+
+ # Ask before moving on.
+ if prompt:
+ if not Confirm.ask("Do you want to stake:\n[bold white] amount: {}\n hotkey: {}\n fee: {}[/bold white ]?".format( staking_balance, wallet.hotkey_str, stake_fee) ):
+ continue
+
+ with bittensor.__console__.status(":satellite: Staking to chain: [white]{}[/white] ...".format(self.network)):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='add_stake',
+ call_params={
+ 'hotkey': wallet.hotkey.ss58_address,
+ 'ammount_staked': staking_balance.rao
+ }
+ )
+ extrinsic = substrate.create_signed_extrinsic( call = call, keypair = wallet.coldkey )
+ response = substrate.submit_extrinsic( extrinsic, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Sent[/green]")
+ old_balance -= staking_balance + stake_fee
+ successful_stakes += 1
+ if staking_all:
+ # If staked all, no need to continue
+ break
+
+ continue
+
+ response.process_events()
+ if response.is_success:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Finalized[/green]")
+ else:
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: error:{}".format(response.error_message))
+
+ if response.is_success:
+ block = self.get_current_block()
+ new_stake = bittensor.Balance.from_tao( self.neuron_for_uid( uid = neuron.uid, block = block ).stake)
+ new_balance = self.get_balance( wallet.coldkey.ss58_address )
+ bittensor.__console__.print("Stake ({}): [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( neuron.uid, neuron.stake, new_stake ))
+ old_balance = new_balance
+ successful_stakes += 1
+ if staking_all:
+ # If staked all, no need to continue
+ break
+
+ if successful_stakes != 0:
+ with bittensor.__console__.status(":satellite: Checking Balance on: ([white]{}[/white] ...".format(self.network)):
+ new_balance = self.get_balance( wallet.coldkey.ss58_address )
+ bittensor.__console__.print("Balance: [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( old_balance, new_balance ))
+ return True
+
+ return False
+
+ def transfer(
+ self,
+ wallet: 'bittensor.wallet',
+ dest: str,
+ amount: Union[Balance, float],
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ prompt: bool = False,
+ ) -> bool:
+ r""" Transfers funds from this wallet to the destination public key address
+ Args:
+ wallet (bittensor.wallet):
+ Bittensor wallet object to make transfer from.
+ dest (str, ss58_address or ed25519):
+ Destination public key address of reciever.
+ amount (Union[Balance, int]):
+ Amount to stake as bittensor balance, or float interpreted as Tao.
+ wait_for_inclusion (bool):
+ If set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ If set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ Flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ # Validate destination address.
+ if not is_valid_bittensor_address_or_public_key( dest ):
+ bittensor.__console__.print(":cross_mark: [red]Invalid destination address[/red]:[bold white]\n {}[/bold white]".format(dest))
+ return False
+
+ if isinstance( dest, bytes):
+ # Convert bytes to hex string.
+ dest = "0x" + dest.hex()
+
+ # Unlock wallet coldkey.
+ wallet.coldkey
+
+ # Convert to bittensor.Balance
+ if not isinstance(amount, bittensor.Balance ):
+ transfer_balance = bittensor.Balance.from_tao( amount )
+ else:
+ transfer_balance = amount
+
+ # Check balance.
+ with bittensor.__console__.status(":satellite: Checking Balance..."):
+ account_balance = self.get_balance( wallet.coldkey.ss58_address )
+
+ # Estimate transfer fee.
+ with bittensor.__console__.status(":satellite: Estimating Transfer Fees..."):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='Balances',
+ call_function='transfer',
+ call_params={
+ 'dest': dest,
+ 'value': transfer_balance.rao
+ }
+ )
+ payment_info = substrate.get_payment_info(call = call, keypair = wallet.coldkey)
+ transfer_fee = "N/A"
+ if payment_info:
+ transfer_fee = bittensor.Balance.from_rao(payment_info['partialFee'])
+ bittensor.__console__.print("[green]Estimated Fee: {}[/green]".format( transfer_fee ))
+ else:
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: could not estimate transfer fee, assuming base fee of 0.2")
+ transfer_fee = bittensor.Balance.from_tao( 0.2 )
+
+ if account_balance < transfer_balance + transfer_fee:
+ bittensor.__console__.print(":cross_mark: [red]Not enough balance[/red]:[bold white]\n balance: {}\n amount: {} fee: {}[/bold white]".format( account_balance, transfer_balance, transfer_fee ))
+ return False
+
+ # Ask before moving on.
+ if prompt:
+ if not Confirm.ask("Do you want to transfer:[bold white]\n amount: {}\n from: {}:{}\n to: {}\n for fee: {}[/bold white]".format( transfer_balance, wallet.name, wallet.coldkey.ss58_address, dest, transfer_fee )):
+ return False
+
+ with bittensor.__console__.status(":satellite: Transferring..."):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='Balances',
+ call_function='transfer',
+ call_params={
+ 'dest': dest,
+ 'value': transfer_balance.rao
+ }
+ )
+ extrinsic = substrate.create_signed_extrinsic( call = call, keypair = wallet.coldkey )
+ response = substrate.submit_extrinsic( extrinsic, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Sent[/green]")
+ return True
+
+ # Otherwise continue with finalization.
+ response.process_events()
+ if response.is_success:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Finalized[/green]")
+ block_hash = response.block_hash
+ bittensor.__console__.print("[green]Block Hash: {}[/green]".format( block_hash ))
+ explorer_url = "https://explorer.nakamoto.opentensor.ai/#/explorer/query/{block_hash}".format( block_hash = block_hash )
+ bittensor.__console__.print("[green]Explorer Link: {}[/green]".format( explorer_url ))
+ else:
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: error:{}".format(response.error_message))
+
+ if response.is_success:
+ with bittensor.__console__.status(":satellite: Checking Balance..."):
+ new_balance = self.get_balance( wallet.coldkey.ss58_address )
+ bittensor.__console__.print("Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format(account_balance, new_balance))
+ return True
+
+ return False
+
+ def unstake (
+ self,
+ wallet: 'bittensor.wallet',
+ amount: Union[Balance, float] = None,
+ wait_for_inclusion:bool = True,
+ wait_for_finalization:bool = False,
+ prompt: bool = False,
+ ) -> bool:
+ r""" Removes stake into the wallet coldkey from the specified hotkey uid.
+ Args:
+ wallet (bittensor.wallet):
+ bittensor wallet object.
+ amount (Union[Balance, float]):
+ Amount to stake as bittensor balance, or float interpreted as tao.
+ wait_for_inclusion (bool):
+ if set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ if set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ # Decrypt keys,
+ wallet.coldkey
+ wallet.hotkey
+
+ with bittensor.__console__.status(":satellite: Syncing with chain: [white]{}[/white] ...".format(self.network)):
+ old_balance = self.get_balance( wallet.coldkey.ss58_address )
+ neuron = self.neuron_for_pubkey( ss58_hotkey = wallet.hotkey.ss58_address )
+ if neuron.is_null:
+ bittensor.__console__.print(":cross_mark: [red]Hotkey: {} is not registered.[/red]".format( wallet.hotkey_str ))
+ return False
+
+ # Covert to bittensor.Balance
+ if amount == None:
+ # Unstake it all.
+ unstaking_balance = bittensor.Balance.from_tao( neuron.stake )
+ elif not isinstance(amount, bittensor.Balance ):
+ unstaking_balance = bittensor.Balance.from_tao( amount )
+ else:
+ unstaking_balance = amount
+
+ # Check enough to unstake.
+ stake_on_uid = bittensor.Balance.from_tao( neuron.stake )
+ if unstaking_balance > stake_on_uid:
+ bittensor.__console__.print(":cross_mark: [red]Not enough stake[/red]: [green]{}[/green] to unstake: [blue]{}[/blue] from hotkey: [white]{}[/white]".format(stake_on_uid, unstaking_balance, wallet.hotkey_str))
+ return False
+
+ # Estimate unstaking fee.
+ unstake_fee = None # To be filled.
+ with bittensor.__console__.status(":satellite: Estimating Staking Fees..."):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='remove_stake',
+ call_params={
+ 'hotkey': wallet.hotkey.ss58_address,
+ 'ammount_unstaked': unstaking_balance.rao
+ }
+ )
+ payment_info = substrate.get_payment_info(call = call, keypair = wallet.coldkey)
+ if payment_info:
+ unstake_fee = bittensor.Balance.from_rao(payment_info['partialFee'])
+ bittensor.__console__.print("[green]Estimated Fee: {}[/green]".format( unstake_fee ))
+ else:
+ unstake_fee = bittensor.Balance.from_tao( 0.2 )
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: could not estimate staking fee, assuming base fee of 0.2")
+
+ # Ask before moving on.
+ if prompt:
+ if not Confirm.ask("Do you want to unstake:\n[bold white] amount: {}\n hotkey: {}\n fee: {}[/bold white ]?".format( unstaking_balance, wallet.hotkey_str, unstake_fee) ):
+ return False
+
+ with bittensor.__console__.status(":satellite: Unstaking from chain: [white]{}[/white] ...".format(self.network)):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='remove_stake',
+ call_params={
+ 'hotkey': wallet.hotkey.ss58_address,
+ 'ammount_unstaked': unstaking_balance.rao
+ }
+ )
+ extrinsic = substrate.create_signed_extrinsic( call = call, keypair = wallet.coldkey )
+ response = substrate.submit_extrinsic( extrinsic, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Sent[/green]")
+ return True
+
+ response.process_events()
+ if response.is_success:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Finalized[/green]")
+ else:
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: error:{}".format(response.error_message))
+
+ if response.is_success:
+ with bittensor.__console__.status(":satellite: Checking Balance on: ([white]{}[/white] ...".format(self.network)):
+ new_balance = self.get_balance( wallet.coldkey.ss58_address )
+ block = self.get_current_block()
+ new_stake = bittensor.Balance.from_tao( self.neuron_for_uid( uid = neuron.uid, block = block ).stake)
+ bittensor.__console__.print("Balance: [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( old_balance, new_balance ))
+ bittensor.__console__.print("Stake: [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( stake_on_uid, new_stake ))
+ return True
+
+ return False
+
+ def unstake_multiple (
+ self,
+ wallets: List['bittensor.wallet'],
+ amounts: List[Union[Balance, float]] = None,
+ wait_for_inclusion: bool = True,
+ wait_for_finalization: bool = False,
+ prompt: bool = False,
+ ) -> bool:
+ r""" Removes stake from each wallet hotkey in the list, using each amount, to their common coldkey.
+ Args:
+ wallets (List[bittensor.wallet]):
+ List of wallets to unstake.
+ amounts (List[Union[Balance, float]]):
+ List of amounts to unstake. If None, unstake all.
+ wait_for_inclusion (bool):
+ if set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ if set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or included in the block.
+ flag is true if any wallet was unstaked.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ if not isinstance(wallets, list):
+ raise TypeError("wallets must be a list of bittensor.wallet")
+
+ if len(wallets) == 0:
+ return True
+
+ if amounts is not None and len(amounts) != len(wallets):
+ raise ValueError("amounts must be a list of the same length as wallets")
+
+ if amounts is not None and not all(isinstance(amount, (Balance, float)) for amount in amounts):
+ raise TypeError("amounts must be a [list of bittensor.Balance or float] or None")
+
+ if amounts is None:
+ amounts = [None] * len(wallets)
+ else:
+ # Convert to Balance
+ amounts = [bittensor.Balance.from_tao(amount) if isinstance(amount, float) else amount for amount in amounts ]
+
+ if sum(amount.tao for amount in amounts) == 0:
+ # Staking 0 tao
+ return True
+
+
+ wallet_0: 'bittensor.wallet' = wallets[0]
+ # Decrypt coldkey for all wallet(s) to use
+ wallet_0.coldkey
+
+ neurons = []
+ with bittensor.__console__.status(":satellite: Syncing with chain: [white]{}[/white] ...".format(self.network)):
+ old_balance = self.get_balance( wallet_0.coldkey.ss58_address )
+
+ for wallet in wallets:
+ neuron = self.neuron_for_pubkey( ss58_hotkey = wallet.hotkey.ss58_address )
+
+ if neuron.is_null:
+ neurons.append( None )
+ continue
+
+ neurons.append( neuron )
+
+ successful_unstakes = 0
+ for wallet, amount, neuron in zip(wallets, amounts, neurons):
+ if neuron is None:
+ bittensor.__console__.print(":cross_mark: [red]Hotkey: {} is not registered. Skipping ...[/red]".format( wallet.hotkey_str ))
+ continue
+
+ if wallet.coldkeypub.ss58_address != wallet_0.coldkeypub.ss58_address:
+ bittensor.__console__.print(":cross_mark: [red]Hotkey: {} is not under the same coldkey. Skipping ...[/red]".format( wallet.hotkey_str ))
+ continue
+
+ # Assign decrypted coldkey from wallet_0
+ # so we don't have to decrypt again
+ wallet._coldkey = wallet_0._coldkey
+
+ # Covert to bittensor.Balance
+ if amount == None:
+ # Unstake it all.
+ unstaking_balance = bittensor.Balance.from_tao( neuron.stake )
+ elif not isinstance(amount, bittensor.Balance ):
+ unstaking_balance = bittensor.Balance.from_tao( amount )
+ else:
+ unstaking_balance = amount
+
+ # Check enough to unstake.
+ stake_on_uid = bittensor.Balance.from_tao( neuron.stake )
+ if unstaking_balance > stake_on_uid:
+ bittensor.__console__.print(":cross_mark: [red]Not enough stake[/red]: [green]{}[/green] to unstake: [blue]{}[/blue] from hotkey: [white]{}[/white]".format(stake_on_uid, unstaking_balance, wallet.hotkey_str))
+ continue
+
+ # Estimate unstaking fee.
+ unstake_fee = None # To be filled.
+ with bittensor.__console__.status(":satellite: Estimating Staking Fees..."):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='remove_stake',
+ call_params={
+ 'hotkey': wallet.hotkey.ss58_address,
+ 'ammount_unstaked': unstaking_balance.rao
+ }
+ )
+ payment_info = substrate.get_payment_info(call = call, keypair = wallet.coldkey)
+ if payment_info:
+ unstake_fee = bittensor.Balance.from_rao(payment_info['partialFee'])
+ bittensor.__console__.print("[green]Estimated Fee: {}[/green]".format( unstake_fee ))
+ else:
+ unstake_fee = bittensor.Balance.from_tao( 0.2 )
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: could not estimate staking fee, assuming base fee of 0.2")
+
+ # Ask before moving on.
+ if prompt:
+ if not Confirm.ask("Do you want to unstake:\n[bold white] amount: {}\n hotkey: {}\n fee: {}[/bold white ]?".format( unstaking_balance, wallet.hotkey_str, unstake_fee) ):
+ continue
+
+ with bittensor.__console__.status(":satellite: Unstaking from chain: [white]{}[/white] ...".format(self.network)):
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='remove_stake',
+ call_params={
+ 'hotkey': wallet.hotkey.ss58_address,
+ 'ammount_unstaked': unstaking_balance.rao
+ }
+ )
+ extrinsic = substrate.create_signed_extrinsic( call = call, keypair = wallet.coldkey )
+ response = substrate.submit_extrinsic( extrinsic, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Sent[/green]")
+ successful_unstakes += 1
+ continue
+
+ response.process_events()
+ if response.is_success:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Finalized[/green]")
+ else:
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: error:{}".format(response.error_message))
+
+ if response.is_success:
+ block = self.get_current_block()
+ new_stake = bittensor.Balance.from_tao( self.neuron_for_uid( uid = neuron.uid, block = block ).stake)
+ bittensor.__console__.print("Stake ({}): [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( neuron.uid, stake_on_uid, new_stake ))
+ successful_unstakes += 1
+
+ if successful_unstakes != 0:
+ with bittensor.__console__.status(":satellite: Checking Balance on: ([white]{}[/white] ...".format(self.network)):
+ new_balance = self.get_balance( wallet.coldkey.ss58_address )
+ bittensor.__console__.print("Balance: [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( old_balance, new_balance ))
+ return True
+
+ return False
+
+ def set_weights(
+ self,
+ wallet: 'bittensor.wallet',
+ uids: Union[torch.LongTensor, list],
+ weights: Union[torch.FloatTensor, list],
+ wait_for_inclusion:bool = False,
+ wait_for_finalization:bool = False,
+ prompt:bool = False,
+ netuid:int = None
+ ) -> bool:
+ r""" Sets the given weights and values on chain for wallet hotkey account.
+ Args:
+ wallet (bittensor.wallet):
+ bittensor wallet object.
+ uids (Union[torch.LongTensor, list]):
+ uint64 uids of destination neurons.
+ weights ( Union[torch.FloatTensor, list]):
+ weights to set which must floats and correspond to the passed uids.
+ wait_for_inclusion (bool):
+ if set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ if set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ # First convert types.
+ if isinstance( uids, list ):
+ uids = torch.tensor( uids, dtype = torch.int64 )
+ if isinstance( weights, list ):
+ weights = torch.tensor( weights, dtype = torch.float32 )
+
+ # Reformat and normalize.
+ weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( uids, weights )
+
+ # Ask before moving on.
+ if prompt:
+ if not Confirm.ask("Do you want to set weights:\n[bold white] weights: {}\n uids: {}[/bold white ]?".format( [float(v/4294967295) for v in weight_vals], weight_uids) ):
+ return False
+
+ with bittensor.__console__.status(":satellite: Setting weights on [white]{}[/white] ...".format(self.network)):
+ try:
+ with self.substrate as substrate:
+ call = substrate.compose_call(
+ call_module='SubtensorModule',
+ call_function='set_weights',
+ call_params = {'dests': weight_uids, 'weights': weight_vals}
+ )
+ extrinsic = substrate.create_signed_extrinsic( call = call, keypair = wallet.hotkey )
+ response = substrate.submit_extrinsic( extrinsic, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization )
+ # We only wait here if we expect finalization.
+ if not wait_for_finalization and not wait_for_inclusion:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Sent[/green]")
+ return True
+
+ response.process_events()
+ if response.is_success:
+ bittensor.__console__.print(":white_heavy_check_mark: [green]Finalized[/green]")
+ bittensor.logging.success( prefix = 'Set weights', sufix = 'Finalized: ' + str(response.is_success) )
+ else:
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: error:{}".format(response.error_message))
+ bittensor.logging.warning( prefix = 'Set weights', sufix = 'Failed: ' + str(response.error_message) )
+
+ except Exception as e:
+ bittensor.__console__.print(":cross_mark: [red]Failed[/red]: error:{}".format(e))
+ bittensor.logging.warning( prefix = 'Set weights', sufix = 'Failed: ' + str(e) )
+ return False
+
+ if response.is_success:
+ bittensor.__console__.print("Set weights:\n[bold white] weights: {}\n uids: {}[/bold white ]".format( [float(v/4294967295) for v in weight_vals], weight_uids ))
+ message = 'Success: ' + f'Set {len(uids)} weights, top 5 weights' + str(list(zip(uids.tolist()[:5], [round (w,4) for w in weights.tolist()[:5]] )))
+ logger.debug('Set weights:'.ljust(20) + message)
+ return True
+
+ return False
+
+ def get_balance(self, address: str, block: int = None) -> Balance:
+ r""" Returns the token balance for the passed ss58_address address
+ Args:
+ address (Substrate address format, default = 42):
+ ss58 chain address.
+ Return:
+ balance (bittensor.utils.balance.Balance):
+ account balance
+ """
+ try:
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query(
+ module='System',
+ storage_function='Account',
+ params=[address],
+ block_hash = None if block == None else substrate.get_block_hash( block )
+ )
+ result = make_substrate_call_with_retry()
+ except scalecodec.exceptions.RemainingScaleBytesNotEmptyException:
+ logger.critical("Your wallet it legacy formatted, you need to run btcli stake --ammount 0 to reformat it." )
+ return Balance(1000)
+ return Balance( result.value['data']['free'] )
+
+ def get_current_block(self) -> int:
+ r""" Returns the current block number on the chain.
+ Returns:
+ block_number (int):
+ Current chain blocknumber.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.get_block_number(None)
+ return make_substrate_call_with_retry()
+
+ def get_balances(self, block: int = None) -> Dict[str, Balance]:
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query_map(
+ module='System',
+ storage_function='Account',
+ block_hash = None if block == None else substrate.get_block_hash( block )
+ )
+ result = make_substrate_call_with_retry()
+ return_dict = {}
+ for r in result:
+ bal = bittensor.Balance( int( r[1]['data']['free'].value ) )
+ return_dict[r[0].value] = bal
+ return return_dict
+
+ def neurons(self, block: int = None ) -> List[SimpleNamespace]:
+ r""" Returns a list of neuron from the chain.
+ Args:
+ block (int):
+ block to sync from.
+ Returns:
+ neuron (List[SimpleNamespace]):
+ List of neuron objects.
+ """
+ neurons = []
+ for id in tqdm(range(self.get_n( block ))):
+ try:
+ neuron = self.neuron_for_uid(id, block)
+ neurons.append( neuron )
+ except Exception as e:
+ logger.error('Exception encountered when pulling neuron {}: {}'.format(id, e))
+ break
+ return neurons
+
+ @staticmethod
+ def _null_neuron() -> SimpleNamespace:
+ neuron = SimpleNamespace()
+ neuron.active = 0
+ neuron.stake = 0
+ neuron.rank = 0
+ neuron.trust = 0
+ neuron.consensus = 0
+ neuron.incentive = 0
+ neuron.dividends = 0
+ neuron.emission = 0
+ neuron.weights = []
+ neuron.bonds = []
+ neuron.version = 0
+ neuron.modality = 0
+ neuron.uid = 0
+ neuron.port = 0
+ neuron.priority = 0
+ neuron.ip_type = 0
+ neuron.last_update = 0
+ neuron.ip = 0
+ neuron.is_null = True
+ neuron.coldkey = "000000000000000000000000000000000000000000000000"
+ neuron.hotkey = "000000000000000000000000000000000000000000000000"
+ return neuron
+
+ @staticmethod
+ def _neuron_dict_to_namespace(neuron_dict) -> SimpleNamespace:
+ if neuron_dict['hotkey'] == '5C4hrfjw9DjXZTzV3MwzrrAr9P1MJhSrvWGWqi1eSuyUpnhM':
+ return Subtensor._null_neuron()
+ else:
+ RAOPERTAO = 1000000000
+ U64MAX = 18446744073709551615
+ neuron = SimpleNamespace( **neuron_dict )
+ neuron.stake = neuron.stake / RAOPERTAO
+ neuron.rank = neuron.rank / U64MAX
+ neuron.trust = neuron.trust / U64MAX
+ neuron.consensus = neuron.consensus / U64MAX
+ neuron.incentive = neuron.incentive / U64MAX
+ neuron.dividends = neuron.dividends / U64MAX
+ neuron.emission = neuron.emission / RAOPERTAO
+ neuron.is_null = False
+ return neuron
+
+ def neuron_for_uid( self, uid: int, block: int = None, netuid: int = None ) -> Union[ dict, None ]:
+ r""" Returns a list of neuron from the chain.
+ Args:
+ uid ( int ):
+ The uid of the neuron to query for.
+ block ( int ):
+ The neuron at a particular block
+ Returns:
+ neuron (dict(NeuronMetadata)):
+ neuron object associated with uid or None if it does not exist.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ result = dict( substrate.query(
+ module='SubtensorModule',
+ storage_function='Neurons',
+ params = [ uid ],
+ block_hash = None if block == None else substrate.get_block_hash( block )
+ ).value )
+ return result
+ result = make_substrate_call_with_retry()
+ neuron = Subtensor._neuron_dict_to_namespace( result )
+ return neuron
+
+ def get_uid_for_hotkey( self, ss58_hotkey: str, block: int = None, netuid: int = None ) -> int:
+ r""" Returns true if the passed hotkey is registered on the chain.
+ Args:
+ ss58_hotkey ( str ):
+ The hotkey to query for a neuron.
+ Returns:
+ uid ( int ):
+ UID of passed hotkey or -1 if it is non-existent.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query (
+ module='SubtensorModule',
+ storage_function='Hotkeys',
+ params = [ ss58_hotkey ],
+ block_hash = None if block == None else substrate.get_block_hash( block )
+ )
+ result = make_substrate_call_with_retry()
+ # Process the result.
+ uid = int(result.value)
+
+ neuron = self.neuron_for_uid( uid, block )
+ if neuron.hotkey != ss58_hotkey:
+ return -1
+ else:
+ return uid
+
+
+ def is_hotkey_registered( self, ss58_hotkey: str, block: int = None) -> bool:
+ r""" Returns true if the passed hotkey is registered on the chain.
+ Args:
+ ss58_hotkey ( str ):
+ The hotkey to query for a neuron.
+ Returns:
+ is_registered ( bool):
+ True if the passed hotkey is registered on the chain.
+ """
+ uid = self.get_uid_for_hotkey( ss58_hotkey = ss58_hotkey, block = block)
+ if uid == -1:
+ return False
+ else:
+ return True
+
+ def neuron_for_pubkey( self, ss58_hotkey: str, block: int = None ) -> SimpleNamespace:
+ r""" Returns a list of neuron from the chain.
+ Args:
+ ss58_hotkey ( str ):
+ The hotkey to query for a neuron.
+ Returns:
+ neuron ( dict(NeuronMetadata) ):
+ neuron object associated with uid or None if it does not exist.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return substrate.query (
+ module='SubtensorModule',
+ storage_function='Hotkeys',
+ params = [ ss58_hotkey ],
+ block_hash = None if block == None else substrate.get_block_hash( block )
+ )
+ result = make_substrate_call_with_retry()
+ # Get response uid. This will be zero if it doesn't exist.
+ uid = int(result.value)
+ neuron = self.neuron_for_uid( uid, block )
+ if neuron.hotkey != ss58_hotkey:
+ return Subtensor._null_neuron()
+ else:
+ return neuron
+
+ def get_n( self, block: int = None ) -> int:
+ r""" Returns the number of neurons on the chain at block.
+ Args:
+ block ( int ):
+ The block number to get the neuron count from.
+ Returns:
+ n ( int ):
+ the number of neurons subscribed to the chain.
+ """
+ @retry(delay=2, tries=3, backoff=2, max_delay=4)
+ def make_substrate_call_with_retry():
+ with self.substrate as substrate:
+ return int(substrate.query( module='SubtensorModule', storage_function = 'N', block_hash = None if block == None else substrate.get_block_hash( block ) ).value)
+ return make_substrate_call_with_retry()
+
+ def neuron_for_wallet( self, wallet: 'bittensor.Wallet', block: int = None ) -> SimpleNamespace:
+ r""" Returns a list of neuron from the chain.
+ Args:
+ wallet ( `bittensor.Wallet` ):
+ Checks to ensure that the passed wallet is subscribed.
+ Returns:
+ neuron ( dict(NeuronMetadata) ):
+ neuron object associated with uid or None if it does not exist.
+ """
+ return self.neuron_for_pubkey ( wallet.hotkey.ss58_address, block = block )
diff --git a/bittensor/_wallet/__init__.py b/bittensor/_wallet/__init__.py
index 69de88e398..69bc11e4d1 100644
--- a/bittensor/_wallet/__init__.py
+++ b/bittensor/_wallet/__init__.py
@@ -26,7 +26,7 @@
from bittensor.utils import strtobool
from . import wallet_impl, wallet_mock
-
+from .naka_wallet_impl import Wallet as naka_wallet
class wallet:
""" Create and init wallet that stores hot and coldkey
@@ -78,12 +78,21 @@ def __new__(
config = config
)
- return wallet_impl.Wallet(
- name = config.wallet.get('name', bittensor.defaults.wallet.name),
- hotkey = config.wallet.get('hotkey', bittensor.defaults.wallet.hotkey),
- path = config.wallet.path,
- config = config
- )
+ network = config.get('subtensor.network', bittensor.defaults.subtensor.network)
+ if network == 'finney':
+ return wallet_impl.Wallet(
+ name = config.wallet.get('name', bittensor.defaults.wallet.name),
+ hotkey = config.wallet.get('hotkey', bittensor.defaults.wallet.hotkey),
+ path = config.wallet.path,
+ config = config
+ )
+ elif network == 'nakamoto':
+ return naka_wallet(
+ name = config.wallet.get('name', bittensor.defaults.wallet.name),
+ hotkey = config.wallet.get('hotkey', bittensor.defaults.wallet.hotkey),
+ path = config.wallet.path,
+ config = config
+ )
@classmethod
def config(cls) -> 'bittensor.Config':
diff --git a/bittensor/_wallet/naka_wallet_impl.py b/bittensor/_wallet/naka_wallet_impl.py
new file mode 100644
index 0000000000..3cd6c2c088
--- /dev/null
+++ b/bittensor/_wallet/naka_wallet_impl.py
@@ -0,0 +1,760 @@
+""" Implementation of the wallet class, which manages balances with staking and transfer. Also manages hotkey and coldkey.
+"""
+# The MIT License (MIT)
+# Copyright © 2021 Yuma Rao
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software.
+
+# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+import os
+import sys
+from types import SimpleNamespace
+from typing import Optional, Union
+
+import bittensor
+from bittensor.utils import is_valid_bittensor_address_or_public_key
+from substrateinterface import Keypair
+from termcolor import colored
+
+
+def display_mnemonic_msg( keypair : Keypair, key_type : str ):
+ """ Displaying the mnemonic and warning message to keep mnemonic safe
+ """
+ mnemonic = keypair.mnemonic
+ mnemonic_green = colored(mnemonic, 'green')
+ print (colored("\nIMPORTANT: Store this mnemonic in a secure (preferable offline place), as anyone " \
+ "who has possesion of this mnemonic can use it to regenerate the key and access your tokens. \n", "red"))
+ print ("The mnemonic to the new {} is:\n\n{}\n".format(key_type, mnemonic_green))
+ print ("You can use the mnemonic to recreate the key in case it gets lost. The command to use to regenerate the key using this mnemonic is:")
+ print("btcli regen_{} --mnemonic {}".format(key_type, mnemonic))
+ print('')
+
+class Wallet():
+ """
+ Bittensor wallet maintenance class. Each wallet contains a coldkey and a hotkey.
+ The coldkey is the user's primary key for holding stake in their wallet
+ and is the only way that users can access Tao. Coldkeys can hold tokens and should be encrypted on your device.
+ The coldkey must be used to stake and unstake funds from a running node. The hotkey, on the other hand, is only used
+ for suscribing and setting weights from running code. Hotkeys are linked to coldkeys through the metagraph.
+ """
+ def __init__(
+ self,
+ name:str,
+ path:str,
+ hotkey:str,
+ config: 'bittensor.Config' = None,
+ ):
+ r""" Init bittensor wallet object containing a hot and coldkey.
+ Args:
+ name (required=True, default='default):
+ The name of the wallet to unlock for running bittensor
+ hotkey (required=True, default='default):
+ The name of hotkey used to running the miner.
+ path (required=True, default='~/.bittensor/wallets/'):
+ The path to your bittensor wallets
+ config (:obj:`bittensor.Config`, `optional`):
+ bittensor.wallet.config()
+ """
+ self.name = name
+ self.path = path
+ self.hotkey_str = hotkey
+ self._hotkey = None
+ self._coldkey = None
+ self._coldkeypub = None
+ self.config = config
+
+ def __str__(self):
+ return "Wallet ({}, {}, {})".format(self.name, self.hotkey_str, self.path)
+
+ def __repr__(self):
+ return self.__str__()
+
+ @property
+ def neuron(self) -> SimpleNamespace:
+ return self.get_neuron()
+
+ @property
+ def trust(self) -> SimpleNamespace:
+ return self.get_neuron().trust
+
+ @property
+ def rank(self) -> SimpleNamespace:
+ return self.get_neuron().rank
+
+ @property
+ def incentive(self) -> SimpleNamespace:
+ return self.get_neuron().incentive
+
+ @property
+ def dividends(self) -> SimpleNamespace:
+ return self.get_neuron().dividends
+
+ @property
+ def consensus(self) -> SimpleNamespace:
+ return self.get_neuron().consensus
+
+ @property
+ def inflation(self) -> SimpleNamespace:
+ return self.get_neuron().inflation
+
+ @property
+ def ip(self) -> SimpleNamespace:
+ return self.get_neuron().ip
+
+ @property
+ def last_update(self) -> SimpleNamespace:
+ return self.get_neuron().last_update
+
+ @property
+ def weights(self) -> SimpleNamespace:
+ return self.get_neuron().weights
+
+ @property
+ def bonds(self) -> SimpleNamespace:
+ return self.get_neuron().bonds
+
+ @property
+ def uid(self) -> SimpleNamespace:
+ return self.get_uid()
+
+ @property
+ def stake(self) -> SimpleNamespace:
+ return self.get_stake()
+
+ @property
+ def balance(self) -> SimpleNamespace:
+ return self.get_balance()
+
+ def is_registered( self, subtensor: 'bittensor.Subtensor' = None, netuid: int = None ) -> bool:
+ """ Returns true if this wallet is registered.
+ Args:
+ subtensor( 'bittensor.Subtensor' ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ Determines which network we check for registration.
+ Return:
+ is_registered (bool):
+ Is the wallet registered on the chain.
+ """
+ if subtensor == None: subtensor = bittensor.subtensor()
+ return subtensor.is_hotkey_registered( self.hotkey.ss58_address )
+
+ def get_neuron ( self, subtensor: 'bittensor.Subtensor' = None ) -> Union[ SimpleNamespace, None] :
+ """ Returns this wallet's neuron information from subtensor.
+ Args:
+ subtensor( 'bittensor.Subtensor' ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ Return:
+ neuron (Union[ SimpleNamespace, None ]):
+ neuron account on the chain or None if you are not registered.
+ """
+ if subtensor == None: subtensor = bittensor.subtensor()
+ if not self.is_registered(subtensor=subtensor):
+ print(colored('This wallet is not registered. Call wallet.register() before this function.','red'))
+ return None
+ neuron = subtensor.neuron_for_wallet( self )
+ return neuron
+
+ def get_uid ( self, subtensor: 'bittensor.Subtensor' = None, netuid: int = None ) -> int:
+ """ Returns this wallet's hotkey uid or -1 if the hotkey is not subscribed.
+ Args:
+ subtensor( 'bittensor.Subtensor' ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ Return:
+ uid (int):
+ Network uid or -1 if you are not registered.
+ """
+ if subtensor == None: subtensor = bittensor.subtensor()
+ if not self.is_registered(subtensor=subtensor):
+ print(colored('This wallet is not registered. Call wallet.register() before this function.','red'))
+ return -1
+ neuron = self.get_neuron(subtensor = subtensor)
+ if neuron.is_null:
+ return -1
+ else:
+ return neuron.uid
+
+ def get_stake ( self, subtensor: 'bittensor.Subtensor' = None ) -> 'bittensor.Balance':
+ """ Returns this wallet's staking balance from passed subtensor connection.
+ Args:
+ subtensor( 'bittensor.Subtensor' ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ Return:
+ balance (bittensor.utils.balance.Balance):
+ Stake account balance.
+ """
+ if subtensor == None: subtensor = bittensor.subtensor()
+ if not self.is_registered(subtensor=subtensor):
+ print(colored('This wallet is not registered. Call wallet.register() before this function.','red'))
+ return bittensor.Balance(0)
+ neuron = self.get_neuron(subtensor = subtensor)
+ if neuron.is_null:
+ return bittensor.Balance(0)
+ else:
+ return bittensor.Balance.from_tao(neuron.stake)
+
+ def get_balance( self, subtensor: 'bittensor.Subtensor' = None ) -> 'bittensor.Balance':
+ """ Returns this wallet's coldkey balance from passed subtensor connection.
+ Args:
+ subtensor( 'bittensor.Subtensor' ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ Return:
+ balance (bittensor.utils.balance.Balance):
+ Coldkey balance.
+ """
+ if subtensor == None: subtensor = bittensor.subtensor()
+ return subtensor.get_balance(address = self.coldkeypub.ss58_address)
+
+ def reregister(
+ self,
+ subtensor: 'bittensor.Subtensor' = None,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ prompt: bool = False,
+ netuid: int = None,
+ ) -> Optional['bittensor.Wallet']:
+ """ Re-register this wallet on the chain.
+ Args:
+ subtensor( 'bittensor.Subtensor' ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ wait_for_inclusion (bool):
+ if set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ if set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+
+ Return:
+ wallet (bittensor.Wallet):
+ This wallet.
+ """
+ if subtensor == None:
+ subtensor = bittensor.subtensor()
+ if not self.is_registered(subtensor=subtensor):
+ # Check if the wallet should reregister
+ if not self.config.wallet.get('reregister'):
+ sys.exit(0)
+
+ self.register(
+ subtensor = subtensor,
+ prompt = prompt,
+ TPB = self.config.subtensor.register.cuda.get('TPB', None),
+ update_interval = self.config.subtensor.register.cuda.get('update_interval', None),
+ num_processes = self.config.subtensor.register.get('num_processes', None),
+ cuda = self.config.subtensor.register.cuda.get('use_cuda', bittensor.defaults.subtensor.register.cuda.use_cuda),
+ dev_id = self.config.subtensor.register.cuda.get('dev_id', None),
+ wait_for_inclusion = wait_for_inclusion,
+ wait_for_finalization = wait_for_finalization,
+ output_in_place = self.config.subtensor.register.get('output_in_place', bittensor.defaults.subtensor.register.output_in_place),
+ log_verbose = self.config.subtensor.register.get('verbose', bittensor.defaults.subtensor.register.verbose),
+ )
+
+ return self
+
+ def register (
+ self,
+ subtensor: 'bittensor.Subtensor' = None,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ prompt: bool = False,
+ max_allowed_attempts: int = 3,
+ cuda: bool = False,
+ dev_id: int = 0,
+ TPB: int = 256,
+ num_processes: Optional[int] = None,
+ update_interval: Optional[int] = None,
+ output_in_place: bool = True,
+ log_verbose: bool = False,
+ netuid: int = None
+ ) -> 'bittensor.Wallet':
+ """ Registers the wallet to chain.
+ Args:
+ subtensor( 'bittensor.Subtensor' ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ wait_for_inclusion (bool):
+ If set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ If set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ max_allowed_attempts (int):
+ Maximum number of attempts to register the wallet.
+ cuda (bool):
+ If true, the wallet should be registered on the cuda device.
+ dev_id (int):
+ The cuda device id.
+ TPB (int):
+ The number of threads per block (cuda).
+ num_processes (int):
+ The number of processes to use to register.
+ update_interval (int):
+ The number of nonces to solve between updates.
+ output_in_place (bool):
+ If true, the registration output is printed in-place.
+ log_verbose (bool):
+ If true, the registration output is more verbose.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ # Get chain connection.
+ if subtensor == None: subtensor = bittensor.subtensor()
+ subtensor.register(
+ wallet = self,
+ wait_for_inclusion = wait_for_inclusion,
+ wait_for_finalization = wait_for_finalization,
+ prompt=prompt, max_allowed_attempts=max_allowed_attempts,
+ output_in_place = output_in_place,
+ cuda=cuda,
+ dev_id=dev_id,
+ TPB=TPB,
+ num_processes=num_processes,
+ update_interval=update_interval,
+ log_verbose=log_verbose,
+ )
+
+ return self
+
+ def add_stake( self,
+ amount: Union[float, bittensor.Balance] = None,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ subtensor: 'bittensor.Subtensor' = None,
+ prompt: bool = False
+ ) -> bool:
+ """ Stakes tokens from this wallet's coldkey onto it's hotkey.
+ Args:
+ amount_tao (float):
+ amount of tao to stake or bittensor balance object. If None, stakes all available balance.
+ wait_for_inclusion (bool):
+ if set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ if set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ subtensor( `bittensor.Subtensor` ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ if subtensor == None: subtensor = bittensor.subtensor()
+ return subtensor.add_stake( wallet = self, amount = amount, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization, prompt=prompt )
+
+ def remove_stake( self,
+ amount: Union[float, bittensor.Balance] = None,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ subtensor: 'bittensor.Subtensor' = None,
+ prompt: bool = False,
+ ) -> bool:
+ """ Removes stake from this wallet's hotkey and moves them onto it's coldkey balance.
+ Args:
+ amount_tao (float):
+ amount of tao to unstake or bittensor balance object. If None, unstakes all available hotkey balance.
+ wait_for_inclusion (bool):
+ if set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ if set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ subtensor( `bittensor.Subtensor` ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ if subtensor == None: subtensor = bittensor.subtensor()
+ return subtensor.unstake( wallet = self, amount = amount, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization, prompt=prompt )
+
+ def transfer(
+ self,
+ dest:str,
+ amount: Union[float, bittensor.Balance] ,
+ wait_for_inclusion: bool = False,
+ wait_for_finalization: bool = True,
+ subtensor: 'bittensor.Subtensor' = None,
+ prompt: bool = False,
+ ) -> bool:
+ """ Transfers Tao from this wallet's coldkey to the destination address.
+ Args:
+ dest (`type`:str, required):
+ The destination address either encoded as a ss58 or ed255 public-key string of
+ secondary account.
+ amount (float, required):
+ amount of tao to transfer or a bittensor balance object.
+ wait_for_inclusion (bool):
+ if set, waits for the extrinsic to enter a block before returning true,
+ or returns false if the extrinsic fails to enter the block within the timeout.
+ wait_for_finalization (bool):
+ if set, waits for the extrinsic to be finalized on the chain before returning true,
+ or returns false if the extrinsic fails to be finalized within the timeout.
+ subtensor( `bittensor.Subtensor` ):
+ Bittensor subtensor connection. Overrides with defaults if None.
+ prompt (bool):
+ If true, the call waits for confirmation from the user before proceeding.
+ Returns:
+ success (bool):
+ flag is true if extrinsic was finalized or uncluded in the block.
+ If we did not wait for finalization / inclusion, the response is true.
+ """
+ if subtensor == None: subtensor = bittensor.subtensor()
+ return subtensor.transfer( wallet = self, dest = dest, amount = amount, wait_for_inclusion = wait_for_inclusion, wait_for_finalization = wait_for_finalization, prompt=prompt )
+
+ def create_if_non_existent( self, coldkey_use_password:bool = True, hotkey_use_password:bool = False) -> 'Wallet':
+ """ Checks for existing coldkeypub and hotkeys and creates them if non-existent.
+ """
+ return self.create(coldkey_use_password, hotkey_use_password)
+
+ def create (self, coldkey_use_password:bool = True, hotkey_use_password:bool = False ) -> 'Wallet':
+ """ Checks for existing coldkeypub and hotkeys and creates them if non-existent.
+ """
+ # ---- Setup Wallet. ----
+ if not self.coldkey_file.exists_on_device() and not self.coldkeypub_file.exists_on_device():
+ self.create_new_coldkey( n_words = 12, use_password = coldkey_use_password )
+ if not self.hotkey_file.exists_on_device():
+ self.create_new_hotkey( n_words = 12, use_password = hotkey_use_password )
+ return self
+
+ def recreate (self, coldkey_use_password:bool = True, hotkey_use_password:bool = False ) -> 'Wallet':
+ """ Checks for existing coldkeypub and hotkeys and creates them if non-existent.
+ """
+ # ---- Setup Wallet. ----
+ self.create_new_coldkey( n_words = 12, use_password = coldkey_use_password )
+ self.create_new_hotkey( n_words = 12, use_password = hotkey_use_password )
+ return self
+
+ @property
+ def hotkey_file(self) -> 'bittensor.Keyfile':
+
+ wallet_path = os.path.expanduser(os.path.join(self.path, self.name))
+ hotkey_path = os.path.join(wallet_path, "hotkeys", self.hotkey_str)
+ return bittensor.keyfile( path = hotkey_path )
+
+ @property
+ def coldkey_file(self) -> 'bittensor.Keyfile':
+ wallet_path = os.path.expanduser(os.path.join(self.path, self.name))
+ coldkey_path = os.path.join(wallet_path, "coldkey")
+ return bittensor.keyfile( path = coldkey_path )
+
+ @property
+ def coldkeypub_file(self) -> 'bittensor.Keyfile':
+ wallet_path = os.path.expanduser(os.path.join(self.path, self.name))
+ coldkeypub_path = os.path.join(wallet_path, "coldkeypub.txt")
+ return bittensor.Keyfile( path = coldkeypub_path )
+
+ def set_hotkey(self, keypair: 'bittensor.Keypair', encrypt: bool = False, overwrite: bool = False) -> 'bittensor.Keyfile':
+ self._hotkey = keypair
+ self.hotkey_file.set_keypair( keypair, encrypt = encrypt, overwrite = overwrite )
+
+ def set_coldkeypub(self, keypair: 'bittensor.Keypair', encrypt: bool = False, overwrite: bool = False) -> 'bittensor.Keyfile':
+ self._coldkeypub = Keypair(ss58_address=keypair.ss58_address)
+ self.coldkeypub_file.set_keypair( self._coldkeypub, encrypt = encrypt, overwrite = overwrite )
+
+ def set_coldkey(self, keypair: 'bittensor.Keypair', encrypt: bool = True, overwrite: bool = False) -> 'bittensor.Keyfile':
+ self._coldkey = keypair
+ self.coldkey_file.set_keypair( self._coldkey, encrypt = encrypt, overwrite = overwrite )
+
+ def get_coldkey(self, password: str = None ) -> 'bittensor.Keypair':
+ self.coldkey_file.get_keypair( password = password )
+
+ def get_hotkey(self, password: str = None ) -> 'bittensor.Keypair':
+ self.hotkey_file.get_keypair( password = password )
+
+ def get_coldkeypub(self, password: str = None ) -> 'bittensor.Keypair':
+ self.coldkeypub_file.get_keypair( password = password )
+
+ @property
+ def hotkey(self) -> 'bittensor.Keypair':
+ r""" Loads the hotkey from wallet.path/wallet.name/hotkeys/wallet.hotkey or raises an error.
+ Returns:
+ hotkey (Keypair):
+ hotkey loaded from config arguments.
+ Raises:
+ KeyFileError: Raised if the file is corrupt of non-existent.
+ CryptoKeyError: Raised if the user enters an incorrec password for an encrypted keyfile.
+ """
+ if self._hotkey == None:
+ self._hotkey = self.hotkey_file.keypair
+ return self._hotkey
+
+ @property
+ def coldkey(self) -> 'bittensor.Keypair':
+ r""" Loads the hotkey from wallet.path/wallet.name/coldkey or raises an error.
+ Returns:
+ coldkey (Keypair):
+ colkey loaded from config arguments.
+ Raises:
+ KeyFileError: Raised if the file is corrupt of non-existent.
+ CryptoKeyError: Raised if the user enters an incorrec password for an encrypted keyfile.
+ """
+ if self._coldkey == None:
+ self._coldkey = self.coldkey_file.keypair
+ return self._coldkey
+
+ @property
+ def coldkeypub(self) -> 'bittensor.Keypair':
+ r""" Loads the coldkeypub from wallet.path/wallet.name/coldkeypub.txt or raises an error.
+ Returns:
+ coldkeypub (Keypair):
+ colkeypub loaded from config arguments.
+ Raises:
+ KeyFileError: Raised if the file is corrupt of non-existent.
+ CryptoKeyError: Raised if the user enters an incorrect password for an encrypted keyfile.
+ """
+ if self._coldkeypub == None:
+ self._coldkeypub = self.coldkeypub_file.keypair
+ return self._coldkeypub
+
+ def create_coldkey_from_uri(self, uri:str, use_password: bool = True, overwrite:bool = False) -> 'Wallet':
+ """ Creates coldkey from suri string, optionally encrypts it with the user's inputed password.
+ Args:
+ uri: (str, required):
+ URI string to use i.e. /Alice or /Bob
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the coldkey under the same path //coldkey
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created coldkey.
+ """
+ keypair = Keypair.create_from_uri( uri )
+ display_mnemonic_msg( keypair, "coldkey" )
+ self.set_coldkey( keypair, encrypt = use_password, overwrite = overwrite)
+ self.set_coldkeypub( keypair, overwrite = overwrite)
+ return self
+
+ def create_hotkey_from_uri( self, uri:str, use_password: bool = False, overwrite:bool = False) -> 'Wallet':
+ """ Creates hotkey from suri string, optionally encrypts it with the user's inputed password.
+ Args:
+ uri: (str, required):
+ URI string to use i.e. /Alice or /Bob
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the hotkey under the same path //hotkeys/
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created hotkey.
+ """
+ keypair = Keypair.create_from_uri( uri )
+ display_mnemonic_msg( keypair, "hotkey" )
+ self.set_hotkey( keypair, encrypt=use_password, overwrite = overwrite)
+ return self
+
+ def new_coldkey( self, n_words:int = 12, use_password: bool = True, overwrite:bool = False) -> 'Wallet':
+ """ Creates a new coldkey, optionally encrypts it with the user's inputed password and saves to disk.
+ Args:
+ n_words: (int, optional):
+ Number of mnemonic words to use.
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the coldkey under the same path //coldkey
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created coldkey.
+ """
+ self.create_new_coldkey( n_words, use_password, overwrite )
+
+ def create_new_coldkey( self, n_words:int = 12, use_password: bool = True, overwrite:bool = False) -> 'Wallet':
+ """ Creates a new coldkey, optionally encrypts it with the user's inputed password and saves to disk.
+ Args:
+ n_words: (int, optional):
+ Number of mnemonic words to use.
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the coldkey under the same path //coldkey
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created coldkey.
+ """
+ mnemonic = Keypair.generate_mnemonic( n_words)
+ keypair = Keypair.create_from_mnemonic(mnemonic)
+ display_mnemonic_msg( keypair, "coldkey" )
+ self.set_coldkey( keypair, encrypt = use_password, overwrite = overwrite)
+ self.set_coldkeypub( keypair, overwrite = overwrite)
+ return self
+
+ def new_hotkey( self, n_words:int = 12, use_password: bool = False, overwrite:bool = False) -> 'Wallet':
+ """ Creates a new hotkey, optionally encrypts it with the user's inputed password and saves to disk.
+ Args:
+ n_words: (int, optional):
+ Number of mnemonic words to use.
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the hotkey under the same path //hotkeys/
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created hotkey.
+ """
+ self.create_new_hotkey( n_words, use_password, overwrite )
+
+ def create_new_hotkey( self, n_words:int = 12, use_password: bool = False, overwrite:bool = False) -> 'Wallet':
+ """ Creates a new hotkey, optionally encrypts it with the user's inputed password and saves to disk.
+ Args:
+ n_words: (int, optional):
+ Number of mnemonic words to use.
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the hotkey under the same path //hotkeys/
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created hotkey.
+ """
+ mnemonic = Keypair.generate_mnemonic( n_words)
+ keypair = Keypair.create_from_mnemonic(mnemonic)
+ display_mnemonic_msg( keypair, "hotkey" )
+ self.set_hotkey( keypair, encrypt=use_password, overwrite = overwrite)
+ return self
+
+ def regen_coldkey( self, mnemonic: Optional[Union[list, str]]=None, seed: Optional[str]=None, use_password: bool = True, overwrite:bool = False) -> 'Wallet':
+ """ Regenerates the coldkey from passed mnemonic, encrypts it with the user's password and save the file
+ Args:
+ mnemonic: (Union[list, str], optional):
+ Key mnemonic as list of words or string space separated words.
+ seed: (str, optional):
+ Seed as hex string.
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the coldkey under the same path //coldkey
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created coldkey.
+ """
+ self.regenerate_coldkey(mnemonic, seed, use_password, overwrite)
+
+ def regenerate_coldkeypub( self, ss58_address: Optional[str] = None, public_key: Optional[Union[str, bytes]] = None, overwrite: bool = False ) -> 'Wallet':
+ """ Regenerates the coldkeypub from passed ss58_address or public_key and saves the file
+ Requires either ss58_address or public_key to be passed.
+ Args:
+ ss58_address: (str, optional):
+ Address as ss58 string.
+ public_key: (str | bytes, optional):
+ Public key as hex string or bytes.
+ overwrite (bool, optional) (default: False):
+ Will this operation overwrite the coldkeypub (if exists) under the same path //coldkeypub
+ Returns:
+ wallet (bittensor.Wallet):
+ newly re-generated Wallet with coldkeypub.
+
+ """
+ if ss58_address is None and public_key is None:
+ raise ValueError("Either ss58_address or public_key must be passed")
+
+ if not is_valid_bittensor_address_or_public_key( ss58_address if ss58_address is not None else public_key ):
+ raise ValueError(f"Invalid {'ss58_address' if ss58_address is not None else 'public_key'}")
+
+ keypair = Keypair(ss58_address=ss58_address, public_key=public_key, ss58_format=bittensor.__ss58_format__)
+
+ # No need to encrypt the public key
+ self.set_coldkeypub( keypair, overwrite = overwrite)
+
+ return self
+
+ # Short name for regenerate_coldkeypub
+ regen_coldkeypub = regenerate_coldkeypub
+
+ def regenerate_coldkey( self, mnemonic: Optional[Union[list, str]] = None, seed: Optional[str] = None, use_password: bool = True, overwrite:bool = False) -> 'Wallet':
+ """ Regenerates the coldkey from passed mnemonic, encrypts it with the user's password and save the file
+ Args:
+ mnemonic: (Union[list, str], optional):
+ Key mnemonic as list of words or string space separated words.
+ seed: (str, optional):
+ Seed as hex string.
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the coldkey under the same path //coldkey
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created coldkey.
+ """
+ if mnemonic is None and seed is None:
+ raise ValueError("Must pass either mnemonic or seed")
+ if mnemonic is not None:
+ if isinstance( mnemonic, str): mnemonic = mnemonic.split()
+ if len(mnemonic) not in [12,15,18,21,24]:
+ raise ValueError("Mnemonic has invalid size. This should be 12,15,18,21 or 24 words")
+ keypair = Keypair.create_from_mnemonic(" ".join(mnemonic))
+ display_mnemonic_msg( keypair, "coldkey" )
+ else:
+ # seed is not None
+ keypair = Keypair.create_from_seed(seed)
+
+ self.set_coldkey( keypair, encrypt = use_password, overwrite = overwrite)
+ self.set_coldkeypub( keypair, overwrite = overwrite)
+ return self
+
+ def regen_hotkey( self, mnemonic: Optional[Union[list, str]], seed: Optional[str] = None, use_password: bool = True, overwrite:bool = False) -> 'Wallet':
+ """ Regenerates the hotkey from passed mnemonic, encrypts it with the user's password and save the file
+ Args:
+ mnemonic: (Union[list, str], optional):
+ Key mnemonic as list of words or string space separated words.
+ seed: (str, optional):
+ Seed as hex string.
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the hotkey under the same path //hotkeys/
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created hotkey.
+ """
+ self.regenerate_hotkey(mnemonic, seed, use_password, overwrite)
+
+ def regenerate_hotkey( self, mnemonic: Optional[Union[list, str]] = None, seed: Optional[str] = None, use_password: bool = True, overwrite:bool = False) -> 'Wallet':
+ """ Regenerates the hotkey from passed mnemonic, encrypts it with the user's password and save the file
+ Args:
+ mnemonic: (Union[list, str], optional):
+ Key mnemonic as list of words or string space separated words.
+ seed: (str, optional):
+ Seed as hex string.
+ use_password (bool, optional):
+ Is the created key password protected.
+ overwrite (bool, optional):
+ Will this operation overwrite the hotkey under the same path //hotkeys/
+ Returns:
+ wallet (bittensor.Wallet):
+ this object with newly created hotkey.
+ """
+ if mnemonic is None and seed is None:
+ raise ValueError("Must pass either mnemonic or seed")
+ if mnemonic is not None:
+ if isinstance( mnemonic, str): mnemonic = mnemonic.split()
+ if len(mnemonic) not in [12,15,18,21,24]:
+ raise ValueError("Mnemonic has invalid size. This should be 12,15,18,21 or 24 words")
+ keypair = Keypair.create_from_mnemonic(" ".join(mnemonic))
+ display_mnemonic_msg( keypair, "hotkey" )
+ else:
+ # seed is not None
+ keypair = Keypair.create_from_seed(seed)
+
+ self.set_hotkey( keypair, encrypt=use_password, overwrite = overwrite)
+ return self
diff --git a/bittensor/_wallet/wallet_impl.py b/bittensor/_wallet/wallet_impl.py
index 276eca74f0..904c726572 100644
--- a/bittensor/_wallet/wallet_impl.py
+++ b/bittensor/_wallet/wallet_impl.py
@@ -176,9 +176,16 @@ def is_registered( self, subtensor: Optional['bittensor.Subtensor'] = None, netu
is_registered (bool):
Is the wallet registered on the chain.
"""
- if subtensor == None: subtensor = bittensor.subtensor()
- if netuid == None: return subtensor.is_hotkey_registered_any( self.hotkey.ss58_address )
- return subtensor.is_hotkey_registered_on_subnet( self.hotkey.ss58_address, netuid = netuid )
+ if subtensor == None: subtensor = bittensor.subtensor(self.config)
+ if self.config.subtensor.network == 'finney':
+ if netuid == None:
+ return subtensor.is_hotkey_registered_any( self.hotkey.ss58_address )
+ else:
+ return subtensor.is_hotkey_registered_on_subnet( self.hotkey.ss58_address, netuid = netuid )
+ else:
+ neuron = subtensor.neuron_for_pubkey( ss58_hotkey = self.hotkey.ss58_address )
+ return not neuron.is_null
+
def get_neuron ( self, netuid: int, subtensor: Optional['bittensor.Subtensor'] = None ) -> Optional['bittensor.NeuronInfo'] :
""" Returns this wallet's neuron information from subtensor.
diff --git a/bittensor/utils/registratrion_old.py b/bittensor/utils/registratrion_old.py
new file mode 100644
index 0000000000..ecf42e45a7
--- /dev/null
+++ b/bittensor/utils/registratrion_old.py
@@ -0,0 +1,835 @@
+import binascii
+import hashlib
+import math
+import multiprocessing
+import os
+import random
+import time
+from dataclasses import dataclass
+from datetime import timedelta
+from queue import Empty, Full
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+import backoff
+import bittensor
+import torch
+from Crypto.Hash import keccak
+from rich import console as rich_console
+from rich import status as rich_status
+
+from .register_cuda import solve_cuda
+
+
+class CUDAException(Exception):
+ """An exception raised when an error occurs in the CUDA environment."""
+ pass
+
+
+def hex_bytes_to_u8_list( hex_bytes: bytes ):
+ hex_chunks = [int(hex_bytes[i:i+2], 16) for i in range(0, len(hex_bytes), 2)]
+ return hex_chunks
+
+
+def u8_list_to_hex( values: list ):
+ total = 0
+ for val in reversed(values):
+ total = (total << 8) + val
+ return total
+
+
+def create_seal_hash( block_hash:bytes, nonce:int ) -> bytes:
+ block_bytes = block_hash.encode('utf-8')[2:]
+ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, 'little'))
+ pre_seal = nonce_bytes + block_bytes
+ seal_sh256 = hashlib.sha256( bytearray(hex_bytes_to_u8_list(pre_seal)) ).digest()
+ kec = keccak.new(digest_bits=256)
+ seal = kec.update( seal_sh256 ).digest()
+ return seal
+
+
+def seal_meets_difficulty( seal:bytes, difficulty:int ):
+ seal_number = int.from_bytes(seal, "big")
+ product = seal_number * difficulty
+ limit = int(math.pow(2,256))- 1
+ if product > limit:
+ return False
+ else:
+ return True
+
+
+def solve_for_difficulty( block_hash, difficulty ):
+ meets = False
+ nonce = -1
+ while not meets:
+ nonce += 1
+ seal = create_seal_hash( block_hash, nonce )
+ meets = seal_meets_difficulty( seal, difficulty )
+ if nonce > 1:
+ break
+ return nonce, seal
+
+
+def get_human_readable(num, suffix="H"):
+ for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
+ if abs(num) < 1000.0:
+ return f"{num:3.1f}{unit}{suffix}"
+ num /= 1000.0
+ return f"{num:.1f}Y{suffix}"
+
+
+def millify(n: int):
+ millnames = ['',' K',' M',' B',' T']
+ n = float(n)
+ millidx = max(0,min(len(millnames)-1,
+ int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))
+
+ return '{:.2f}{}'.format(n / 10**(3 * millidx), millnames[millidx])
+
+
+def POWNotStale(subtensor: 'bittensor.Subtensor', pow_result: Dict) -> bool:
+ """Returns True if the POW is not stale.
+ This means the block the POW is solved for is within 3 blocks of the current block.
+ """
+ return pow_result['block_number'] >= subtensor.get_current_block() - 3
+
+
+@dataclass
+class POWSolution:
+ """A solution to the registration PoW problem."""
+ nonce: int
+ block_number: int
+ difficulty: int
+ seal: bytes
+
+
+class SolverBase(multiprocessing.Process):
+ """
+ A process that solves the registration PoW problem.
+ Args:
+ proc_num: int
+ The number of the process being created.
+ num_proc: int
+ The total number of processes running.
+ update_interval: int
+ The number of nonces to try to solve before checking for a new block.
+ finished_queue: multiprocessing.Queue
+ The queue to put the process number when a process finishes each update_interval.
+ Used for calculating the average time per update_interval across all processes.
+ solution_queue: multiprocessing.Queue
+ The queue to put the solution the process has found during the pow solve.
+ newBlockEvent: multiprocessing.Event
+ The event to set by the main process when a new block is finalized in the network.
+ The solver process will check for the event after each update_interval.
+ The solver process will get the new block hash and difficulty and start solving for a new nonce.
+ stopEvent: multiprocessing.Event
+ The event to set by the main process when all the solver processes should stop.
+ The solver process will check for the event after each update_interval.
+ The solver process will stop when the event is set.
+ Used to stop the solver processes when a solution is found.
+ curr_block: multiprocessing.Array
+ The array containing this process's current block hash.
+ The main process will set the array to the new block hash when a new block is finalized in the network.
+ The solver process will get the new block hash from this array when newBlockEvent is set.
+ curr_block_num: multiprocessing.Value
+ The value containing this process's current block number.
+ The main process will set the value to the new block number when a new block is finalized in the network.
+ The solver process will get the new block number from this value when newBlockEvent is set.
+ curr_diff: multiprocessing.Array
+ The array containing this process's current difficulty.
+ The main process will set the array to the new difficulty when a new block is finalized in the network.
+ The solver process will get the new difficulty from this array when newBlockEvent is set.
+ check_block: multiprocessing.Lock
+ The lock to prevent this process from getting the new block data while the main process is updating the data.
+ limit: int
+ The limit of the pow solve for a valid solution.
+ """
+ proc_num: int
+ num_proc: int
+ update_interval: int
+ finished_queue: multiprocessing.Queue
+ solution_queue: multiprocessing.Queue
+ newBlockEvent: multiprocessing.Event
+ stopEvent: multiprocessing.Event
+ curr_block: multiprocessing.Array
+ curr_block_num: multiprocessing.Value
+ curr_diff: multiprocessing.Array
+ check_block: multiprocessing.Lock
+ limit: int
+
+ def __init__(self, proc_num, num_proc, update_interval, finished_queue, solution_queue, stopEvent, curr_block, curr_block_num, curr_diff, check_block, limit):
+ multiprocessing.Process.__init__(self, daemon=True)
+ self.proc_num = proc_num
+ self.num_proc = num_proc
+ self.update_interval = update_interval
+ self.finished_queue = finished_queue
+ self.solution_queue = solution_queue
+ self.newBlockEvent = multiprocessing.Event()
+ self.newBlockEvent.clear()
+ self.curr_block = curr_block
+ self.curr_block_num = curr_block_num
+ self.curr_diff = curr_diff
+ self.check_block = check_block
+ self.stopEvent = stopEvent
+ self.limit = limit
+
+ def run(self):
+ raise NotImplementedError("SolverBase is an abstract class")
+
+
+class Solver(SolverBase):
+ def run(self):
+ block_number: int
+ block_bytes: bytes
+ block_difficulty: int
+ nonce_limit = int(math.pow(2,64)) - 1
+
+ # Start at random nonce
+ nonce_start = random.randint( 0, nonce_limit )
+ nonce_end = nonce_start + self.update_interval
+ while not self.stopEvent.is_set():
+ if self.newBlockEvent.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_bytes = bytes(self.curr_block)
+ block_difficulty = registration_diff_unpack(self.curr_diff)
+
+ self.newBlockEvent.clear()
+
+ # Do a block of nonces
+ solution = solve_for_nonce_block(self, nonce_start, nonce_end, block_bytes, block_difficulty, self.limit, block_number)
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Send time
+ self.finished_queue.put_nowait(self.proc_num)
+ except Full:
+ pass
+
+ nonce_start = random.randint( 0, nonce_limit )
+ nonce_start = nonce_start % nonce_limit
+ nonce_end = nonce_start + self.update_interval
+
+
+class CUDASolver(SolverBase):
+ dev_id: int
+ TPB: int
+
+ def __init__(self, proc_num, num_proc, update_interval, finished_queue, solution_queue, stopEvent, curr_block, curr_block_num, curr_diff, check_block, limit, dev_id: int, TPB: int):
+ super().__init__(proc_num, num_proc, update_interval, finished_queue, solution_queue, stopEvent, curr_block, curr_block_num, curr_diff, check_block, limit)
+ self.dev_id = dev_id
+ self.TPB = TPB
+
+ def run(self):
+ block_number: int = 0 # dummy value
+ block_bytes: bytes = b'0' * 32 # dummy value
+ block_difficulty: int = int(math.pow(2,64)) - 1 # dummy value
+ nonce_limit = int(math.pow(2,64)) - 1 # U64MAX
+
+ # Start at random nonce
+ nonce_start = random.randint( 0, nonce_limit )
+ while not self.stopEvent.is_set():
+ if self.newBlockEvent.is_set():
+ with self.check_block:
+ block_number = self.curr_block_num.value
+ block_bytes = bytes(self.curr_block)
+ block_difficulty = registration_diff_unpack(self.curr_diff)
+
+ self.newBlockEvent.clear()
+
+ # Do a block of nonces
+ solution = solve_for_nonce_block_cuda(self, nonce_start, self.update_interval, block_bytes, block_difficulty, self.limit, block_number, self.dev_id, self.TPB)
+ if solution is not None:
+ self.solution_queue.put(solution)
+
+ try:
+ # Signal that a nonce_block was finished using queue
+ # send our proc_num
+ self.finished_queue.put(self.proc_num)
+ except Full:
+ pass
+
+ # increase nonce by number of nonces processed
+ nonce_start += self.update_interval * self.TPB
+ nonce_start = nonce_start % nonce_limit
+
+
+def solve_for_nonce_block_cuda(solver: CUDASolver, nonce_start: int, update_interval: int, block_bytes: bytes, difficulty: int, limit: int, block_number: int, dev_id: int, TPB: int) -> Optional[POWSolution]:
+ """Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * TPB"""
+ solution, seal = solve_cuda(nonce_start,
+ update_interval,
+ TPB,
+ block_bytes,
+ block_number,
+ difficulty,
+ limit,
+ dev_id)
+
+ if (solution != -1):
+ # Check if solution is valid (i.e. not -1)
+ return POWSolution(solution, block_number, difficulty, seal)
+
+ return None
+
+
+def solve_for_nonce_block(solver: Solver, nonce_start: int, nonce_end: int, block_bytes: bytes, difficulty: int, limit: int, block_number: int) -> Optional[POWSolution]:
+ """Tries to solve the POW for a block of nonces (nonce_start, nonce_end)"""
+ for nonce in range(nonce_start, nonce_end):
+ # Create seal.
+ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, 'little'))
+ pre_seal = nonce_bytes + block_bytes
+ seal_sh256 = hashlib.sha256( bytearray(hex_bytes_to_u8_list(pre_seal)) ).digest()
+ kec = keccak.new(digest_bits=256)
+ seal = kec.update( seal_sh256 ).digest()
+ seal_number = int.from_bytes(seal, "big")
+
+ # Check if seal meets difficulty
+ product = seal_number * difficulty
+ if product < limit:
+ # Found a solution, save it.
+ return POWSolution(nonce, block_number, difficulty, seal)
+
+ return None
+
+
+def registration_diff_unpack(packed_diff: multiprocessing.Array) -> int:
+ """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian."""
+ return int(packed_diff[0] << 32 | packed_diff[1])
+
+
+def registration_diff_pack(diff: int, packed_diff: multiprocessing.Array):
+ """Packs the difficulty into two 32-bit integers. Little endian."""
+ packed_diff[0] = diff >> 32
+ packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits
+
+
+def update_curr_block(curr_diff: multiprocessing.Array, curr_block: multiprocessing.Array, curr_block_num: multiprocessing.Value, block_number: int, block_bytes: bytes, diff: int, lock: multiprocessing.Lock):
+ with lock:
+ curr_block_num.value = block_number
+ for i in range(64):
+ curr_block[i] = block_bytes[i]
+ registration_diff_pack(diff, curr_diff)
+
+
+def get_cpu_count():
+ try:
+ return len(os.sched_getaffinity(0))
+ except AttributeError:
+ # OSX does not have sched_getaffinity
+ return os.cpu_count()
+
+@dataclass
+class RegistrationStatistics:
+ """Statistics for a registration."""
+ time_spent_total: float
+ rounds_total: int
+ time_average: float
+ time_spent: float
+ hash_rate_perpetual: float
+ hash_rate: float
+ difficulty: int
+ block_number: int
+ block_hash: bytes
+
+
+class RegistrationStatisticsLogger:
+ """Logs statistics for a registration."""
+ console: rich_console.Console
+ status: Optional[rich_status.Status]
+
+ def __init__( self, console: rich_console.Console, output_in_place: bool = True) -> None:
+ self.console = console
+
+ if output_in_place:
+ self.status = self.console.status("Solving")
+ else:
+ self.status = None
+
+ def start( self ) -> None:
+ if self.status is not None:
+ self.status.start()
+
+ def stop( self ) -> None:
+ if self.status is not None:
+ self.status.stop()
+
+
+ def get_status_message(cls, stats: RegistrationStatistics, verbose: bool = False) -> str:
+ message = \
+ "Solving\n" + \
+ f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n" + \
+ (
+ f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n" + \
+ f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n" if verbose else ""
+ ) + \
+ f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n" + \
+ f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / " + \
+ f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n" + \
+ f"Block Number: [bold white]{stats.block_number}[/bold white]\n" + \
+ f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n"
+ return message
+
+
+ def update( self, stats: RegistrationStatistics, verbose: bool = False ) -> None:
+ if self.status is not None:
+ self.status.update( self.get_status_message(stats, verbose=verbose) )
+ else:
+ self.console.log( self.get_status_message(stats, verbose=verbose), )
+
+
+def solve_for_difficulty_fast( subtensor, wallet, output_in_place: bool = True, num_processes: Optional[int] = None, update_interval: Optional[int] = None, n_samples: int = 10, alpha_: float = 0.80, log_verbose: bool = False ) -> Optional[POWSolution]:
+ """
+ Solves the POW for registration using multiprocessing.
+ Args:
+ subtensor
+ Subtensor to connect to for block information and to submit.
+ wallet:
+ Wallet to use for registration.
+ output_in_place: bool
+ If true, prints the status in place. Otherwise, prints the status on a new line.
+ num_processes: int
+ Number of processes to use.
+ update_interval: int
+ Number of nonces to solve before updating block information.
+ n_samples: int
+ The number of samples of the hash_rate to keep for the EWMA
+ alpha_: float
+ The alpha for the EWMA for the hash_rate calculation
+ log_verbose: bool
+ If true, prints more verbose logging of the registration metrics.
+ Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ Note:
+ - We can also modify the update interval to do smaller blocks of work,
+ while still updating the block information after a different number of nonces,
+ to increase the transparency of the process while still keeping the speed.
+ """
+ if num_processes == None:
+ # get the number of allowed processes for this process
+ num_processes = min(1, get_cpu_count())
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ limit = int(math.pow(2,256)) - 1
+
+ curr_block = multiprocessing.Array('h', 64, lock=True) # byte array
+ curr_block_num = multiprocessing.Value('i', 0, lock=True) # int
+ curr_diff = multiprocessing.Array('Q', [0, 0], lock=True) # [high, low]
+
+ # Establish communication queues
+ ## See the Solver class for more information on the queues.
+ stopEvent = multiprocessing.Event()
+ stopEvent.clear()
+
+ solution_queue = multiprocessing.Queue()
+ finished_queues = [multiprocessing.Queue() for _ in range(num_processes)]
+ check_block = multiprocessing.Lock()
+
+ # Start consumers
+ solvers = [ Solver(i, num_processes, update_interval, finished_queues[i], solution_queue, stopEvent, curr_block, curr_block_num, curr_diff, check_block, limit)
+ for i in range(num_processes) ]
+
+ # Get first block
+ block_number = subtensor.get_current_block()
+ difficulty = subtensor.difficulty
+ block_hash = subtensor.substrate.get_block_hash( block_number )
+ while block_hash == None:
+ block_hash = subtensor.substrate.get_block_hash( block_number )
+ block_bytes = block_hash.encode('utf-8')[2:]
+ old_block_number = block_number
+ # Set to current block
+ update_curr_block(curr_diff, curr_block, curr_block_num, block_number, block_bytes, difficulty, check_block)
+
+ # Set new block events for each solver to start at the initial block
+ for worker in solvers:
+ worker.newBlockEvent.set()
+
+ for worker in solvers:
+ worker.start() # start the solver processes
+
+ start_time = time.time() # time that the registration started
+ time_last = start_time # time that the last work blocks completed
+
+ curr_stats = RegistrationStatistics(
+ time_spent_total = 0.0,
+ time_average = 0.0,
+ rounds_total = 0,
+ time_spent = 0.0,
+ hash_rate_perpetual = 0.0,
+ hash_rate = 0.0,
+ difficulty = difficulty,
+ block_number = block_number,
+ block_hash = block_hash
+ )
+
+ start_time_perpetual = time.time()
+
+
+ console = bittensor.__console__
+ logger = RegistrationStatisticsLogger(console, output_in_place)
+ logger.start()
+
+ solution = None
+
+ hash_rates = [0] * n_samples # The last n true hash_rates
+ weights = [alpha_ ** i for i in range(n_samples)] # weights decay by alpha
+
+ while not wallet.is_registered(subtensor):
+ # Wait until a solver finds a solution
+ try:
+ solution = solution_queue.get(block=True, timeout=0.25)
+ if solution is not None:
+ break
+ except Empty:
+ # No solution found, try again
+ pass
+
+ # check for new block
+ old_block_number = check_for_newest_block_and_update(
+ subtensor = subtensor,
+ old_block_number=old_block_number,
+ curr_diff=curr_diff,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ curr_stats=curr_stats,
+ update_curr_block=update_curr_block,
+ check_block=check_block,
+ solvers=solvers
+ )
+
+ num_time = 0
+ for finished_queue in finished_queues:
+ try:
+ proc_num = finished_queue.get(timeout=0.1)
+ num_time += 1
+
+ except Empty:
+ continue
+
+ time_now = time.time() # get current time
+ time_since_last = time_now - time_last # get time since last work block(s)
+ if num_time > 0 and time_since_last > 0.0:
+ # create EWMA of the hash_rate to make measure more robust
+
+ hash_rate_ = (num_time * update_interval) / time_since_last
+ hash_rates.append(hash_rate_)
+ hash_rates.pop(0) # remove the 0th data point
+ curr_stats.hash_rate = sum([hash_rates[i]*weights[i] for i in range(n_samples)])/(sum(weights))
+
+ # update time last to now
+ time_last = time_now
+
+ curr_stats.time_average = (curr_stats.time_average*curr_stats.rounds_total + curr_stats.time_spent)/(curr_stats.rounds_total+num_time)
+ curr_stats.rounds_total += num_time
+
+ # Update stats
+ curr_stats.time_spent = time_since_last
+ new_time_spent_total = time_now - start_time_perpetual
+ curr_stats.hash_rate_perpetual = (curr_stats.rounds_total*update_interval)/ new_time_spent_total
+ curr_stats.time_spent_total = new_time_spent_total
+
+ # Update the logger
+ logger.update(curr_stats, verbose=log_verbose)
+
+ # exited while, solution contains the nonce or wallet is registered
+ stopEvent.set() # stop all other processes
+ logger.stop()
+
+ # terminate and wait for all solvers to exit
+ terminate_workers_and_wait_for_exit(solvers)
+
+ return solution
+
+
+@backoff.on_exception(backoff.constant,
+ Exception,
+ interval=1,
+ max_tries=3)
+def get_block_with_retry(subtensor: 'bittensor.Subtensor') -> Tuple[int, int, bytes]:
+ block_number = subtensor.get_current_block()
+ difficulty = subtensor.difficulty
+ block_hash = subtensor.substrate.get_block_hash( block_number )
+ if block_hash is None:
+ raise Exception("Network error. Could not connect to substrate to get block hash")
+ return block_number, difficulty, block_hash
+
+
+class UsingSpawnStartMethod():
+ def __init__(self, force: bool = False):
+ self._old_start_method = None
+ self._force = force
+
+ def __enter__(self):
+ self._old_start_method = multiprocessing.get_start_method(allow_none=True)
+ if self._old_start_method == None:
+ self._old_start_method = 'spawn' # default to spawn
+
+ multiprocessing.set_start_method('spawn', force=self._force)
+
+ def __exit__(self, *args):
+ # restore the old start method
+ multiprocessing.set_start_method(self._old_start_method, force=True)
+
+
+def check_for_newest_block_and_update(
+ subtensor: 'bittensor.Subtensor',
+ old_block_number: int,
+ curr_diff: multiprocessing.Array,
+ curr_block: multiprocessing.Array,
+ curr_block_num: multiprocessing.Value,
+ update_curr_block: Callable,
+ check_block: 'multiprocessing.Lock',
+ solvers: List[Solver],
+ curr_stats: RegistrationStatistics
+ ) -> int:
+ """
+ Checks for a new block and updates the current block information if a new block is found.
+ Args:
+ subtensor (:obj:`bittensor.Subtensor`, `required`):
+ The subtensor object to use for getting the current block.
+ old_block_number (:obj:`int`, `required`):
+ The old block number to check against.
+ curr_diff (:obj:`multiprocessing.Array`, `required`):
+ The current difficulty as a multiprocessing array.
+ curr_block (:obj:`multiprocessing.Array`, `required`):
+ Where the current block is stored as a multiprocessing array.
+ curr_block_num (:obj:`multiprocessing.Value`, `required`):
+ Where the current block number is stored as a multiprocessing value.
+ update_curr_block (:obj:`Callable`, `required`):
+ A function that updates the current block.
+ check_block (:obj:`multiprocessing.Lock`, `required`):
+ A mp lock that is used to check for a new block.
+ solvers (:obj:`List[Solver]`, `required`):
+ A list of solvers to update the current block for.
+ curr_stats (:obj:`RegistrationStatistics`, `required`):
+ The current registration statistics to update.
+ Returns:
+ (int) The current block number.
+ """
+ block_number = subtensor.get_current_block()
+ if block_number != old_block_number:
+ old_block_number = block_number
+ # update block information
+ block_hash = subtensor.substrate.get_block_hash( block_number)
+ while block_hash == None:
+ block_hash = subtensor.substrate.get_block_hash( block_number)
+ block_bytes = block_hash.encode('utf-8')[2:]
+ difficulty = subtensor.difficulty
+
+ update_curr_block(curr_diff, curr_block, curr_block_num, block_number, block_bytes, difficulty, check_block)
+ # Set new block events for each solver
+
+ for worker in solvers:
+ worker.newBlockEvent.set()
+
+ # update stats
+ curr_stats.block_number = block_number
+ curr_stats.block_hash = block_hash
+ curr_stats.difficulty = difficulty
+
+ return old_block_number
+
+
+def solve_for_difficulty_fast_cuda( subtensor: 'bittensor.Subtensor', wallet: 'bittensor.Wallet', output_in_place: bool = True, update_interval: int = 50_000, TPB: int = 512, dev_id: Union[List[int], int] = 0, n_samples: int = 10, alpha_: float = 0.80, log_verbose: bool = False ) -> Optional[POWSolution]:
+ """
+ Solves the registration fast using CUDA
+ Args:
+ subtensor: bittensor.Subtensor
+ The subtensor node to grab blocks
+ wallet: bittensor.Wallet
+ The wallet to register
+ output_in_place: bool
+ If true, prints the output in place, otherwise prints to new lines
+ update_interval: int
+ The number of nonces to try before checking for more blocks
+ TPB: int
+ The number of threads per block. CUDA param that should match the GPU capability
+ dev_id: Union[List[int], int]
+ The CUDA device IDs to execute the registration on, either a single device or a list of devices
+ n_samples: int
+ The number of samples of the hash_rate to keep for the EWMA
+ alpha_: float
+ The alpha for the EWMA for the hash_rate calculation
+ log_verbose: bool
+ If true, prints more verbose logging of the registration metrics.
+ Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust.
+ """
+ if isinstance(dev_id, int):
+ dev_id = [dev_id]
+ elif dev_id is None:
+ dev_id = [0]
+
+ if update_interval is None:
+ update_interval = 50_000
+
+ if not torch.cuda.is_available():
+ raise Exception("CUDA not available")
+
+ limit = int(math.pow(2,256)) - 1
+
+ # Set mp start to use spawn so CUDA doesn't complain
+ with UsingSpawnStartMethod(force=True):
+ curr_block = multiprocessing.Array('h', 64, lock=True) # byte array
+ curr_block_num = multiprocessing.Value('i', 0, lock=True) # int
+ curr_diff = multiprocessing.Array('Q', [0, 0], lock=True) # [high, low]
+
+ ## Create a worker per CUDA device
+ num_processes = len(dev_id)
+
+ # Establish communication queues
+ stopEvent = multiprocessing.Event()
+ stopEvent.clear()
+ solution_queue = multiprocessing.Queue()
+ finished_queues = [multiprocessing.Queue() for _ in range(num_processes)]
+ check_block = multiprocessing.Lock()
+
+ # Start workers
+ solvers = [ CUDASolver(i, num_processes, update_interval, finished_queues[i], solution_queue, stopEvent, curr_block, curr_block_num, curr_diff, check_block, limit, dev_id[i], TPB)
+ for i in range(num_processes) ]
+
+
+ # Get first block
+ block_number = subtensor.get_current_block()
+ difficulty = subtensor.difficulty
+ block_hash = subtensor.substrate.get_block_hash( block_number )
+ while block_hash == None:
+ block_hash = subtensor.substrate.get_block_hash( block_number )
+ block_bytes = block_hash.encode('utf-8')[2:]
+ old_block_number = block_number
+
+ # Set to current block
+ update_curr_block(curr_diff, curr_block, curr_block_num, block_number, block_bytes, difficulty, check_block)
+
+ # Set new block events for each solver to start at the initial block
+ for worker in solvers:
+ worker.newBlockEvent.set()
+
+ for worker in solvers:
+ worker.start() # start the solver processes
+
+ start_time = time.time() # time that the registration started
+ time_last = start_time # time that the last work blocks completed
+
+ curr_stats = RegistrationStatistics(
+ time_spent_total = 0.0,
+ time_average = 0.0,
+ rounds_total = 0,
+ time_spent = 0.0,
+ hash_rate_perpetual = 0.0,
+ hash_rate = 0.0, # EWMA hash_rate (H/s)
+ difficulty = difficulty,
+ block_number = block_number,
+ block_hash = block_hash
+ )
+
+ start_time_perpetual = time.time()
+
+ console = bittensor.__console__
+ logger = RegistrationStatisticsLogger(console, output_in_place)
+ logger.start()
+
+ hash_rates = [0] * n_samples # The last n true hash_rates
+ weights = [alpha_ ** i for i in range(n_samples)] # weights decay by alpha
+
+ solution = None
+ while not wallet.is_registered(subtensor):
+ # Wait until a solver finds a solution
+ try:
+ solution = solution_queue.get(block=True, timeout=0.15)
+ if solution is not None:
+ break
+ except Empty:
+ # No solution found, try again
+ pass
+
+ # check for new block
+ old_block_number = check_for_newest_block_and_update(
+ subtensor = subtensor,
+ curr_diff=curr_diff,
+ curr_block=curr_block,
+ curr_block_num=curr_block_num,
+ old_block_number=old_block_number,
+ curr_stats=curr_stats,
+ update_curr_block=update_curr_block,
+ check_block=check_block,
+ solvers=solvers
+ )
+
+ num_time = 0
+ # Get times for each solver
+ for finished_queue in finished_queues:
+ try:
+ proc_num = finished_queue.get(timeout=0.1)
+ num_time += 1
+
+ except Empty:
+ continue
+
+ time_now = time.time() # get current time
+ time_since_last = time_now - time_last # get time since last work block(s)
+ if num_time > 0 and time_since_last > 0.0:
+ # create EWMA of the hash_rate to make measure more robust
+
+ hash_rate_ = (num_time * TPB * update_interval) / time_since_last
+ hash_rates.append(hash_rate_)
+ hash_rates.pop(0) # remove the 0th data point
+ curr_stats.hash_rate = sum([hash_rates[i]*weights[i] for i in range(n_samples)])/(sum(weights))
+
+ # update time last to now
+ time_last = time_now
+
+ curr_stats.time_average = (curr_stats.time_average*curr_stats.rounds_total + curr_stats.time_spent)/(curr_stats.rounds_total+num_time)
+ curr_stats.rounds_total += num_time
+
+ # Update stats
+ curr_stats.time_spent = time_since_last
+ new_time_spent_total = time_now - start_time_perpetual
+ curr_stats.hash_rate_perpetual = (curr_stats.rounds_total * (TPB * update_interval))/ new_time_spent_total
+ curr_stats.time_spent_total = new_time_spent_total
+
+ # Update the logger
+ logger.update(curr_stats, verbose=log_verbose)
+
+ # exited while, found_solution contains the nonce or wallet is registered
+
+ stopEvent.set() # stop all other processes
+ logger.stop()
+
+ # terminate and wait for all solvers to exit
+ terminate_workers_and_wait_for_exit(solvers)
+
+ return solution
+
+
+def terminate_workers_and_wait_for_exit(workers: List[multiprocessing.Process]) -> None:
+ for worker in workers:
+ worker.terminate()
+ worker.join()
+
+
+def create_pow(
+ subtensor,
+ wallet,
+ output_in_place: bool = True,
+ cuda: bool = False,
+ dev_id: Union[List[int], int] = 0,
+ tpb: int = 256,
+ num_processes: int = None,
+ update_interval: int = None,
+ log_verbose: bool = False
+ ) -> Optional[Dict[str, Any]]:
+ if cuda:
+ solution: POWSolution = solve_for_difficulty_fast_cuda( subtensor, wallet, output_in_place=output_in_place, \
+ dev_id=dev_id, TPB=tpb, update_interval=update_interval, log_verbose=log_verbose
+ )
+ else:
+ solution: POWSolution = solve_for_difficulty_fast( subtensor, wallet, output_in_place=output_in_place, \
+ num_processes=num_processes, update_interval=update_interval, log_verbose=log_verbose
+ )
+
+ return None if solution is None else {
+ 'nonce': solution.nonce,
+ 'difficulty': solution.difficulty,
+ 'block_number': solution.block_number,
+ 'work': binascii.hexlify(solution.seal)
+ }