Skip to content

Commit

Permalink
Merge pull request #175 from neuralinternet/dev
Browse files Browse the repository at this point in the history
fix: address security vulnerability
  • Loading branch information
userhasaccess authored Aug 6, 2024
2 parents a37d62e + ddc390b commit 04cb745
Show file tree
Hide file tree
Showing 3 changed files with 80 additions and 78 deletions.
2 changes: 1 addition & 1 deletion compute/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import string

# Define the version of the template module.
__version__ = "1.4.5"
__version__ = "1.4.6"
__minimal_miner_version__ = "1.4.5"
__minimal_validator_version__ = "1.4.5"

Expand Down
126 changes: 63 additions & 63 deletions neurons/Miner/specs.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,66 +26,66 @@
import bittensor as bt


class RequestSpecsProcessor:
def __init__(self):
self.request_queue = queue.Queue()
self.results_dict = {}
# Start the worker thread
threading.Thread(target=self.worker, daemon=True).start()

def worker(self):
while True:
# Get a request, its associated request_id, and event from the queue
app_data, request_id, done_event = self.request_queue.get()
try:
# Process the request
self.process_request(app_data, request_id)
finally:
# Mark the processed request as done
self.request_queue.task_done()
# Set the event to signal that the processing is complete
done_event.set()
time.sleep(1)

def process_request(self, app_data, request_id):
bt.logging.info(f"💻 Specs query started {request_id} ...")
try:
app_data = ast.literal_eval(app_data)

main_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(main_dir, f"app_{request_id}") # Use a unique file name

# Write the bytes data to a file
with open(file_path, "wb") as file:
file.write(app_data)
subprocess.run(f"chmod +x {file_path}", shell=True, check=True)
result = subprocess.check_output([file_path], shell=True, text=True)
except Exception as e:
traceback.print_exc()
result = {"process_request error": str(e)}
finally:
# Clean up the file after execution
if os.path.exists(file_path):
os.remove(file_path)

# Store the result in the shared dictionary
self.results_dict[request_id] = result

def get_respond(self, app_data):
try:
# Generate a unique identifier for the request
request_id = str(uuid.uuid4())
# Create an event that will be set when the request is processed
done_event = threading.Event()
# Add the request, request_id, and the event to the queue
bt.logging.info(f"💻 Specs query queuing {request_id} ...")
self.request_queue.put((app_data, request_id, done_event))
# Wait for the request to be processed
done_event.wait() # This will block until the event is set
# Retrieve the result from the results_dict
result = self.results_dict.pop(request_id) # Remove the result from the dictionary
bt.logging.info(f"💻 Specs query finalized {request_id} ...")
return result
except Exception as e:
traceback.print_exc()
return {"get_respond error": str(e)}
# class RequestSpecsProcessor:
# def __init__(self):
# self.request_queue = queue.Queue()
# self.results_dict = {}
# # Start the worker thread
# threading.Thread(target=self.worker, daemon=True).start()
#
# def worker(self):
# while True:
# # Get a request, its associated request_id, and event from the queue
# app_data, request_id, done_event = self.request_queue.get()
# try:
# # Process the request
# self.process_request(app_data, request_id)
# finally:
# # Mark the processed request as done
# self.request_queue.task_done()
# # Set the event to signal that the processing is complete
# done_event.set()
# time.sleep(1)
#
# def process_request(self, app_data, request_id):
# bt.logging.info(f"💻 Specs query started {request_id} ...")
# try:
# app_data = ast.literal_eval(app_data)
#
# main_dir = os.path.dirname(os.path.abspath(__file__))
# file_path = os.path.join(main_dir, f"app_{request_id}") # Use a unique file name
#
# # Write the bytes data to a file
# with open(file_path, "wb") as file:
# file.write(app_data)
# subprocess.run(f"chmod +x {file_path}", shell=True, check=True)
# result = subprocess.check_output([file_path], shell=True, text=True)
# except Exception as e:
# traceback.print_exc()
# result = {"process_request error": str(e)}
# finally:
# # Clean up the file after execution
# if os.path.exists(file_path):
# os.remove(file_path)
#
# # Store the result in the shared dictionary
# self.results_dict[request_id] = result
#
# def get_respond(self, app_data):
# try:
# # Generate a unique identifier for the request
# request_id = str(uuid.uuid4())
# # Create an event that will be set when the request is processed
# done_event = threading.Event()
# # Add the request, request_id, and the event to the queue
# bt.logging.info(f"💻 Specs query queuing {request_id} ...")
# self.request_queue.put((app_data, request_id, done_event))
# # Wait for the request to be processed
# done_event.wait() # This will block until the event is set
# # Retrieve the result from the results_dict
# result = self.results_dict.pop(request_id) # Remove the result from the dictionary
# bt.logging.info(f"💻 Specs query finalized {request_id} ...")
# return result
# except Exception as e:
# traceback.print_exc()
# return {"get_respond error": str(e)}
30 changes: 16 additions & 14 deletions neurons/miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
from compute.wandb.wandb import ComputeWandb
from neurons.Miner.allocate import check_allocation, register_allocation
from neurons.Miner.pow import check_cuda_availability, run_miner_pow
from neurons.Miner.specs import RequestSpecsProcessor
# from neurons.Miner.specs import RequestSpecsProcessor
from neurons.Validator.script import check_docker_availability


Expand Down Expand Up @@ -181,7 +181,8 @@ def __init__(self):
self.wandb.update_allocated(None)
bt.logging.info("Container is already running without allocated. Killing the container.")

self.request_specs_processor = RequestSpecsProcessor()
# Disable the Spec request and replaced with WanDB
# self.request_specs_processor = RequestSpecsProcessor()

self.last_updated_block = self.current_block - (self.current_block % 100)

Expand All @@ -198,10 +199,11 @@ def init_axon(self):
forward_fn=self.challenge,
blacklist_fn=self.blacklist_challenge,
priority_fn=self.priority_challenge,
).attach(
forward_fn=self.specs,
blacklist_fn=self.blacklist_specs,
priority_fn=self.priority_specs,
# Disable the spec query and replaced with WanDB
# ).attach(
# forward_fn=self.specs,
# blacklist_fn=self.blacklist_specs,
# priority_fn=self.priority_specs,
)

# Serve passes the axon information to the network + netuid we are hosting on.
Expand Down Expand Up @@ -326,19 +328,19 @@ def base_priority(self, synapse: typing.Union[Specs, Allocate, Challenge]) -> fl
return priority

# The blacklist function decides if a request should be ignored.
def blacklist_specs(self, synapse: Specs) -> typing.Tuple[bool, str]:
return self.base_blacklist(synapse)
# def blacklist_specs(self, synapse: Specs) -> typing.Tuple[bool, str]:

This comment has been minimized.

Copy link
@Rapiiidooo

Rapiiidooo Aug 6, 2024

Contributor

Are u sure about that ? @thomas-chu123

This comment has been minimized.

Copy link
@Rapiiidooo

Rapiiidooo Aug 6, 2024

Contributor

Never mind, its not used anymore, I get it.

This comment has been minimized.

Copy link
@thomas-chu123

thomas-chu123 Aug 7, 2024

Collaborator

Now the Spec query function has been replaced with WanDB. We may still want to collect the spec remotely. Therefore commenting them out may be a faster solution since the subprocess run with shell has the security vulnerability issue.

# return self.base_blacklist(synapse)

# The priority function determines the order in which requests are handled.
# More valuable or higher-priority requests are processed before others.
def priority_specs(self, synapse: Specs) -> float:
return self.base_priority(synapse) + miner_priority_specs
# def priority_specs(self, synapse: Specs) -> float:
# return self.base_priority(synapse) + miner_priority_specs

# This is the PerfInfo function, which decides the miner's response to a valid, high-priority request.
def specs(self, synapse: Specs) -> Specs:
app_data = synapse.specs_input
synapse.specs_output = self.request_specs_processor.get_respond(app_data)
return synapse
# def specs(self, synapse: Specs) -> Specs:
# app_data = synapse.specs_input
# synapse.specs_output = self.request_specs_processor.get_respond(app_data)
# return synapse

# The blacklist function decides if a request should be ignored.
def blacklist_allocate(self, synapse: Allocate) -> typing.Tuple[bool, str]:
Expand Down

0 comments on commit 04cb745

Please sign in to comment.