diff --git a/compute/__init__.py b/compute/__init__.py index c329a228..87f159e0 100644 --- a/compute/__init__.py +++ b/compute/__init__.py @@ -18,7 +18,7 @@ import string # Define the version of the template module. -__version__ = "1.4.5" +__version__ = "1.4.6" __minimal_miner_version__ = "1.4.5" __minimal_validator_version__ = "1.4.5" diff --git a/neurons/Miner/specs.py b/neurons/Miner/specs.py index 49dacba1..600e8c95 100644 --- a/neurons/Miner/specs.py +++ b/neurons/Miner/specs.py @@ -26,66 +26,66 @@ import bittensor as bt -class RequestSpecsProcessor: - def __init__(self): - self.request_queue = queue.Queue() - self.results_dict = {} - # Start the worker thread - threading.Thread(target=self.worker, daemon=True).start() - - def worker(self): - while True: - # Get a request, its associated request_id, and event from the queue - app_data, request_id, done_event = self.request_queue.get() - try: - # Process the request - self.process_request(app_data, request_id) - finally: - # Mark the processed request as done - self.request_queue.task_done() - # Set the event to signal that the processing is complete - done_event.set() - time.sleep(1) - - def process_request(self, app_data, request_id): - bt.logging.info(f"💻 Specs query started {request_id} ...") - try: - app_data = ast.literal_eval(app_data) - - main_dir = os.path.dirname(os.path.abspath(__file__)) - file_path = os.path.join(main_dir, f"app_{request_id}") # Use a unique file name - - # Write the bytes data to a file - with open(file_path, "wb") as file: - file.write(app_data) - subprocess.run(f"chmod +x {file_path}", shell=True, check=True) - result = subprocess.check_output([file_path], shell=True, text=True) - except Exception as e: - traceback.print_exc() - result = {"process_request error": str(e)} - finally: - # Clean up the file after execution - if os.path.exists(file_path): - os.remove(file_path) - - # Store the result in the shared dictionary - self.results_dict[request_id] = result - - def get_respond(self, app_data): - try: - # Generate a unique identifier for the request - request_id = str(uuid.uuid4()) - # Create an event that will be set when the request is processed - done_event = threading.Event() - # Add the request, request_id, and the event to the queue - bt.logging.info(f"💻 Specs query queuing {request_id} ...") - self.request_queue.put((app_data, request_id, done_event)) - # Wait for the request to be processed - done_event.wait() # This will block until the event is set - # Retrieve the result from the results_dict - result = self.results_dict.pop(request_id) # Remove the result from the dictionary - bt.logging.info(f"💻 Specs query finalized {request_id} ...") - return result - except Exception as e: - traceback.print_exc() - return {"get_respond error": str(e)} +# class RequestSpecsProcessor: +# def __init__(self): +# self.request_queue = queue.Queue() +# self.results_dict = {} +# # Start the worker thread +# threading.Thread(target=self.worker, daemon=True).start() +# +# def worker(self): +# while True: +# # Get a request, its associated request_id, and event from the queue +# app_data, request_id, done_event = self.request_queue.get() +# try: +# # Process the request +# self.process_request(app_data, request_id) +# finally: +# # Mark the processed request as done +# self.request_queue.task_done() +# # Set the event to signal that the processing is complete +# done_event.set() +# time.sleep(1) +# +# def process_request(self, app_data, request_id): +# bt.logging.info(f"💻 Specs query started {request_id} ...") +# try: +# app_data = ast.literal_eval(app_data) +# +# main_dir = os.path.dirname(os.path.abspath(__file__)) +# file_path = os.path.join(main_dir, f"app_{request_id}") # Use a unique file name +# +# # Write the bytes data to a file +# with open(file_path, "wb") as file: +# file.write(app_data) +# subprocess.run(f"chmod +x {file_path}", shell=True, check=True) +# result = subprocess.check_output([file_path], shell=True, text=True) +# except Exception as e: +# traceback.print_exc() +# result = {"process_request error": str(e)} +# finally: +# # Clean up the file after execution +# if os.path.exists(file_path): +# os.remove(file_path) +# +# # Store the result in the shared dictionary +# self.results_dict[request_id] = result +# +# def get_respond(self, app_data): +# try: +# # Generate a unique identifier for the request +# request_id = str(uuid.uuid4()) +# # Create an event that will be set when the request is processed +# done_event = threading.Event() +# # Add the request, request_id, and the event to the queue +# bt.logging.info(f"💻 Specs query queuing {request_id} ...") +# self.request_queue.put((app_data, request_id, done_event)) +# # Wait for the request to be processed +# done_event.wait() # This will block until the event is set +# # Retrieve the result from the results_dict +# result = self.results_dict.pop(request_id) # Remove the result from the dictionary +# bt.logging.info(f"💻 Specs query finalized {request_id} ...") +# return result +# except Exception as e: +# traceback.print_exc() +# return {"get_respond error": str(e)} diff --git a/neurons/miner.py b/neurons/miner.py index 3a2a9b68..cf565f2c 100644 --- a/neurons/miner.py +++ b/neurons/miner.py @@ -60,7 +60,7 @@ from compute.wandb.wandb import ComputeWandb from neurons.Miner.allocate import check_allocation, register_allocation from neurons.Miner.pow import check_cuda_availability, run_miner_pow -from neurons.Miner.specs import RequestSpecsProcessor +# from neurons.Miner.specs import RequestSpecsProcessor from neurons.Validator.script import check_docker_availability @@ -181,7 +181,8 @@ def __init__(self): self.wandb.update_allocated(None) bt.logging.info("Container is already running without allocated. Killing the container.") - self.request_specs_processor = RequestSpecsProcessor() + # Disable the Spec request and replaced with WanDB + # self.request_specs_processor = RequestSpecsProcessor() self.last_updated_block = self.current_block - (self.current_block % 100) @@ -198,10 +199,11 @@ def init_axon(self): forward_fn=self.challenge, blacklist_fn=self.blacklist_challenge, priority_fn=self.priority_challenge, - ).attach( - forward_fn=self.specs, - blacklist_fn=self.blacklist_specs, - priority_fn=self.priority_specs, + # Disable the spec query and replaced with WanDB + # ).attach( + # forward_fn=self.specs, + # blacklist_fn=self.blacklist_specs, + # priority_fn=self.priority_specs, ) # Serve passes the axon information to the network + netuid we are hosting on. @@ -326,19 +328,19 @@ def base_priority(self, synapse: typing.Union[Specs, Allocate, Challenge]) -> fl return priority # The blacklist function decides if a request should be ignored. - def blacklist_specs(self, synapse: Specs) -> typing.Tuple[bool, str]: - return self.base_blacklist(synapse) + # def blacklist_specs(self, synapse: Specs) -> typing.Tuple[bool, str]: + # return self.base_blacklist(synapse) # The priority function determines the order in which requests are handled. # More valuable or higher-priority requests are processed before others. - def priority_specs(self, synapse: Specs) -> float: - return self.base_priority(synapse) + miner_priority_specs + # def priority_specs(self, synapse: Specs) -> float: + # return self.base_priority(synapse) + miner_priority_specs # This is the PerfInfo function, which decides the miner's response to a valid, high-priority request. - def specs(self, synapse: Specs) -> Specs: - app_data = synapse.specs_input - synapse.specs_output = self.request_specs_processor.get_respond(app_data) - return synapse + # def specs(self, synapse: Specs) -> Specs: + # app_data = synapse.specs_input + # synapse.specs_output = self.request_specs_processor.get_respond(app_data) + # return synapse # The blacklist function decides if a request should be ignored. def blacklist_allocate(self, synapse: Allocate) -> typing.Tuple[bool, str]: