diff --git a/README.md b/README.md index b46f8f5ca..0d2c8cf58 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ :warning: **Used at one's own risk** :warning: -v7.2.9 +v7.2.10 ## Overview diff --git a/configs/template.json b/configs/template.json index 6c0e3b4a3..db95271e1 100644 --- a/configs/template.json +++ b/configs/template.json @@ -1,7 +1,7 @@ {"backtest": {"base_dir": "backtests", "compress_cache": true, "end_date": "now", - "exchange": "binance", + "exchanges": ["binance", "bybit"], "start_date": "2021-05-01", "starting_balance": 100000.0}, "bot": {"long": {"close_grid_markup_range": 0.0013425, diff --git a/docs/configuration.md b/docs/configuration.md index 144a7a1b8..30420c785 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -7,10 +7,10 @@ Here follows an overview of the parameters found in `config/template.json`. - `base_dir`: Location to save backtest results. - `compress_cache`: set to true to save disk space. Set to false to load faster. - `end_date`: End date of backtest, e.g., 2024-06-23. Set to 'now' to use today's date as end date. -- `exchange`: Exchange from which to fetch 1m OHLCV data. Default is Binance. +- `exchanges`: Exchanges from which to fetch 1m OHLCV data for backtesting and optimizing. - `start_date`: Start date of backtest. - `starting_balance`: Starting balance in USD at the beginning of backtest. -- `symbols`: Coins which were backtested. Note: coins for backtesting are live.approved_coins minus live.ignored_coins. +- `symbols`: Coins which were backtested for each exchange. Note: coins for backtesting are live.approved_coins minus live.ignored_coins. ## Bot Settings @@ -200,7 +200,7 @@ When optimizing, parameter values are within the lower and upper bounds. ### Optimization Limits -The optimizer will penalize backtests whose metrics exceed the given values. +The optimizer will penalize backtests whose metrics exceed the given values. If multiple exchanges are optimized, it will select the worst of them. - `lower_bound_drawdown_worst`: Lowest drawdown during backtest. - `lower_bound_equity_balance_diff_mean`: Mean of the difference between equity and balance. diff --git a/src/backtest.py b/src/backtest.py index fd4a81ca4..8783d4cd6 100644 --- a/src/backtest.py +++ b/src/backtest.py @@ -142,15 +142,19 @@ def check_nested(d0, d1): return check_nested(dict0, dict1) -def get_cache_hash(config): - to_hash = {k: config["backtest"][k] for k in ["end_date", "exchange", "start_date", "symbols"]} - to_hash["end_date"] = format_end_date(to_hash["end_date"]) +def get_cache_hash(config, exchange): + to_hash = { + "symbols": config["backtest"]["symbols"][exchange], + "end_date": format_end_date(config["backtest"]["end_date"]), + "start_date": config["backtest"]["start_date"], + "exchange": exchange, + } to_hash["minimum_coin_age_days"] = config["live"]["minimum_coin_age_days"] return calc_hash(to_hash) -def load_symbols_hlcvs_from_cache(config): - cache_hash = get_cache_hash(config) +def load_symbols_hlcvs_from_cache(config, exchange): + cache_hash = get_cache_hash(config, exchange) cache_dir = Path("caches") / "hlcvs_data" / cache_hash[:16] if os.path.exists(cache_dir): symbols = json.load(open(cache_dir / "symbols.json")) @@ -166,8 +170,8 @@ def load_symbols_hlcvs_from_cache(config): return cache_dir, symbols, hlcvs -def save_symbols_hlcvs_to_cache(config, symbols, hlcvs): - cache_hash = get_cache_hash(config) +def save_symbols_hlcvs_to_cache(config, symbols, hlcvs, exchange): + cache_hash = get_cache_hash(config, exchange) cache_dir = Path("caches") / "hlcvs_data" / cache_hash[:16] cache_dir.mkdir(parents=True, exist_ok=True) if all([os.path.exists(cache_dir / x) for x in ["symbols.json", "hlcvs.npy"]]): @@ -201,10 +205,10 @@ def save_symbols_hlcvs_to_cache(config, symbols, hlcvs): return cache_dir -async def prepare_hlcvs_mss(config): +async def prepare_hlcvs_mss(config, exchange): results_path = oj( config["backtest"]["base_dir"], - config["backtest"]["exchange"], + exchange, "", ) mss_path = oj( @@ -213,7 +217,7 @@ async def prepare_hlcvs_mss(config): ) try: sts = utc_ms() - result = load_symbols_hlcvs_from_cache(config) + result = load_symbols_hlcvs_from_cache(config, exchange) if result: logging.info(f"Seconds to load cache: {(utc_ms() - sts) / 1000:.4f}") cache_dir, symbols, hlcvs = result @@ -223,7 +227,7 @@ async def prepare_hlcvs_mss(config): except: logging.info(f"Unable to load hlcvs data from cache. Fetching...") try: - mss = fetch_market_specific_settings_multi(exchange=config["backtest"]["exchange"]) + mss = fetch_market_specific_settings_multi(exchange=exchange) json.dump(mss, open(make_get_filepath(mss_path), "w")) except Exception as e: logging.error(f"failed to fetch market specific settings {e}") @@ -233,10 +237,10 @@ async def prepare_hlcvs_mss(config): except: raise Exception("failed to load market specific settings from cache") - symbols, timestamps, hlcvs = await prepare_hlcvs(config) + symbols, timestamps, hlcvs = await prepare_hlcvs(config, exchange) logging.info(f"Finished preparing hlcvs data. Shape: {hlcvs.shape}") try: - cache_dir = save_symbols_hlcvs_to_cache(config, symbols, hlcvs) + cache_dir = save_symbols_hlcvs_to_cache(config, symbols, hlcvs, exchange) except Exception as e: logging.error(f"failed to save hlcvs to cache {e}") traceback.print_exc() @@ -244,8 +248,8 @@ async def prepare_hlcvs_mss(config): return symbols, hlcvs, mss, results_path, cache_dir -def prep_backtest_args(config, mss, exchange_params=None, backtest_params=None): - symbols = sorted(set(config["backtest"]["symbols"])) # sort for consistency +def prep_backtest_args(config, mss, exchange, exchange_params=None, backtest_params=None): + symbols = sorted(set(config["backtest"]["symbols"][exchange])) # sort for consistency bot_params = {k: config["bot"][k].copy() for k in ["long", "short"]} for pside in bot_params: bot_params[pside]["wallet_exposure_limit"] = ( @@ -267,8 +271,8 @@ def prep_backtest_args(config, mss, exchange_params=None, backtest_params=None): return bot_params, exchange_params, backtest_params -def run_backtest(hlcvs, mss, config: dict): - bot_params, exchange_params, backtest_params = prep_backtest_args(config, mss) +def run_backtest(hlcvs, mss, config: dict, exchange: str): + bot_params, exchange_params, backtest_params = prep_backtest_args(config, mss, exchange) logging.info(f"Backtesting...") sts = utc_ms() @@ -286,11 +290,13 @@ def run_backtest(hlcvs, mss, config: dict): return fills, equities, analysis -def post_process(config, hlcvs, fills, equities, analysis, results_path): +def post_process(config, hlcvs, fills, equities, analysis, results_path, exchange): sts = utc_ms() fdf = process_forager_fills(fills) equities = pd.Series(equities) - analysis_py, bal_eq = analyze_fills_forager(config["backtest"]["symbols"], hlcvs, fdf, equities) + analysis_py, bal_eq = analyze_fills_forager( + config["backtest"]["symbols"][exchange], hlcvs, fdf, equities + ) for k in analysis_py: if k not in analysis: analysis[k] = analysis_py[k] @@ -305,7 +311,7 @@ def post_process(config, hlcvs, fills, equities, analysis, results_path): fdf.to_csv(f"{results_path}fills.csv") bal_eq.to_csv(oj(results_path, "balance_and_equity.csv")) if not config["disable_plotting"]: - plot_forager(results_path, config["backtest"]["symbols"], fdf, bal_eq, hlcvs) + plot_forager(results_path, config["backtest"]["symbols"][exchange], fdf, bal_eq, hlcvs) def plot_forager(results_path, symbols: [str], fdf: pd.DataFrame, bal_eq, hlcvs): @@ -363,11 +369,13 @@ async def main(): update_config_with_args(config, args) config = format_config(config) config["disable_plotting"] = args.disable_plotting - symbols, hlcvs, mss, results_path, cache_dir = await prepare_hlcvs_mss(config) - config["backtest"]["symbols"] = symbols - config["backtest"]["cache_dir"] = str(cache_dir) - fills, equities, analysis = run_backtest(hlcvs, mss, config) - post_process(config, hlcvs, fills, equities, analysis, results_path) + config["backtest"]["cache_dir"] = {} + for exchange in config["backtest"]["exchanges"]: + symbols, hlcvs, mss, results_path, cache_dir = await prepare_hlcvs_mss(config, exchange) + config["backtest"]["symbols"][exchange] = symbols + config["backtest"]["cache_dir"][exchange] = str(cache_dir) + fills, equities, analysis = run_backtest(hlcvs, mss, config, exchange) + post_process(config, hlcvs, fills, equities, analysis, results_path, exchange) if __name__ == "__main__": diff --git a/src/downloader.py b/src/downloader.py index e6a2fcef6..4439f12d4 100644 --- a/src/downloader.py +++ b/src/downloader.py @@ -423,12 +423,11 @@ async def load_hlcvs(symbol, start_date, end_date, exchange="binance"): return df[["timestamp", "high", "low", "close", "volume"]].values -async def prepare_hlcvs(config: dict): - symbols = sorted(set(config["backtest"]["symbols"])) +async def prepare_hlcvs(config: dict, exchange: str): + symbols = sorted(set(config["backtest"]["symbols"][exchange])) start_date = config["backtest"]["start_date"] end_date = format_end_date(config["backtest"]["end_date"]) end_ts = date_to_ts2(end_date) - exchange = config["backtest"]["exchange"] minimum_coin_age_days = config["live"]["minimum_coin_age_days"] interval_ms = 60000 @@ -660,17 +659,18 @@ async def main(): config = load_config(args.config_path) update_config_with_args(config, args) config = format_config(config) - for symbol in config["backtest"]["symbols"]: - try: - data = await load_hlcvs( - symbol, - config["backtest"]["start_date"], - config["backtest"]["end_date"], - exchange=config["backtest"]["exchange"], - ) - except Exception as e: - logging.error(f"Error with {symbol} {e}") - traceback.print_exc() + for exchange in config["backtest"]["exchanges"]: + for symbol in config["backtest"]["symbols"][exchange]: + try: + data = await load_hlcvs( + symbol, + config["backtest"]["start_date"], + config["backtest"]["end_date"], + exchange=exchange, + ) + except Exception as e: + logging.error(f"Error with {symbol} {e}") + traceback.print_exc() if __name__ == "__main__": diff --git a/src/exchanges/binance.py b/src/exchanges/binance.py index 8077a42fa..8aaa4efc7 100644 --- a/src/exchanges/binance.py +++ b/src/exchanges/binance.py @@ -84,9 +84,9 @@ async def print_new_user_suggestion(self): print(front_pad + "#" * (max_len + 2) + back_pad) print("\n\n") - async def hourly_cycle(self, verbose=True): + async def init_markets(self, verbose=True): await self.print_new_user_suggestion() - await super().hourly_cycle(verbose=verbose) + await super().init_markets(verbose=verbose) def set_market_specific_settings(self): super().set_market_specific_settings() diff --git a/src/optimize.py b/src/optimize.py index 0b1116f4f..bddfc2efe 100644 --- a/src/optimize.py +++ b/src/optimize.py @@ -101,6 +101,7 @@ def create_shared_memory_file(hlcvs): temp_file = tempfile.NamedTemporaryFile(delete=False) logging.info(f"Creating shared memory file: {temp_file.name}...") shared_memory_file = temp_file.name + temp_file.close() try: total_size = hlcvs.nbytes @@ -176,7 +177,7 @@ def mutPolynomialBoundedWrapper(individual, eta, low, up, indpb): def cxSimulatedBinaryBoundedWrapper(ind1, ind2, eta, low, up): """ A wrapper around DEAP's cxSimulatedBinaryBounded function to pre-process - bounds and handle the case where lower and upper bounds are equal. + bounds and handle the case where lower and upper bounds may be equal. Args: ind1: The first individual participating in the crossover. @@ -258,42 +259,82 @@ def managed_mmap(filename, dtype, shape): class Evaluator: - def __init__(self, shared_memory_file, hlcvs_shape, hlcvs_dtype, config, mss, results_queue): + def __init__(self, shared_memory_files, hlcvs_shapes, hlcvs_dtypes, config, msss, results_queue): logging.info("Initializing Evaluator...") - self.shared_memory_file = shared_memory_file - self.hlcvs_shape = hlcvs_shape - self.hlcvs_dtype = hlcvs_dtype - - logging.info("Setting up managed_mmap...") - self.mmap_context = managed_mmap(self.shared_memory_file, self.hlcvs_dtype, self.hlcvs_shape) - logging.info("Entering mmap_context...") - self.shared_hlcvs_np = self.mmap_context.__enter__() - logging.info("mmap_context entered successfully.") + self.shared_memory_files = shared_memory_files + self.hlcvs_shapes = hlcvs_shapes + self.hlcvs_dtypes = hlcvs_dtypes + self.msss = msss + self.exchanges = list(shared_memory_files.keys()) + + self.mmap_contexts = {} + self.shared_hlcvs_np = {} + self.exchange_params = {} + self.backtest_params = {} + for exchange in self.exchanges: + logging.info(f"Setting up managed_mmap for {exchange}...") + self.mmap_contexts[exchange] = managed_mmap( + self.shared_memory_files[exchange], + self.hlcvs_dtypes[exchange], + self.hlcvs_shapes[exchange], + ) + self.shared_hlcvs_np[exchange] = self.mmap_contexts[exchange].__enter__() + _, self.exchange_params[exchange], self.backtest_params[exchange] = prep_backtest_args( + config, self.msss[exchange], exchange + ) + logging.info(f"mmap_context entered successfully for {exchange}.") self.config = config - _, self.exchange_params, self.backtest_params = prep_backtest_args(config, mss) logging.info("Evaluator initialization complete.") self.results_queue = results_queue def evaluate(self, individual): config = individual_to_config(individual, template=self.config) - bot_params, _, _ = prep_backtest_args( - config, [], exchange_params=self.exchange_params, backtest_params=self.backtest_params - ) - fills, equities, analysis = pbr.run_backtest( - self.shared_memory_file, - self.shared_hlcvs_np.shape, - self.shared_hlcvs_np.dtype.str, - bot_params, - self.exchange_params, - self.backtest_params, - ) - w_0, w_1 = self.calc_fitness(analysis) - analysis.update({"w_0": w_0, "w_1": w_1}) - self.results_queue.put({"analysis": analysis, "config": config}) + analyses = {} + for exchange in self.exchanges: + bot_params, _, _ = prep_backtest_args( + config, + [], + exchange, + exchange_params=self.exchange_params[exchange], + backtest_params=self.backtest_params[exchange], + ) + fills, equities, analysis = pbr.run_backtest( + self.shared_memory_files[exchange], + self.shared_hlcvs_np[exchange].shape, + self.shared_hlcvs_np[exchange].dtype.str, + bot_params, + self.exchange_params[exchange], + self.backtest_params[exchange], + ) + analyses[exchange] = analysis + + analyses_combined = self.combine_analyses(analyses) + w_0, w_1 = self.calc_fitness(analyses_combined) + analyses_combined.update({"w_0": w_0, "w_1": w_1}) + + data = { + **config, + **{ + "analyses_combined": analyses_combined, + "analyses": analyses, + }, + } + self.results_queue.put(data) return w_0, w_1 - def calc_fitness(self, analysis): + def combine_analyses(self, analyses): + analyses_combined = {} + keys = analyses[next(iter(analyses))].keys() + for key in keys: + values = [analysis[key] for analysis in analyses.values()] + analyses_combined[f"{key}_mean"] = np.mean(values) + analyses_combined[f"{key}_min"] = np.min(values) + analyses_combined[f"{key}_max"] = np.max(values) + analyses_combined[f"{key}_std"] = np.std(values) + return analyses_combined + + def calc_fitness(self, analyses_combined): modifier = 0.0 for i, key in [ (5, "drawdown_worst"), @@ -302,33 +343,51 @@ def calc_fitness(self, analysis): (2, "loss_profit_ratio"), ]: modifier += ( - max(self.config["optimize"]["limits"][f"lower_bound_{key}"], analysis[key]) + max( + self.config["optimize"]["limits"][f"lower_bound_{key}"], + analyses_combined[f"{key}_max"], + ) - self.config["optimize"]["limits"][f"lower_bound_{key}"] ) * 10**i - if analysis["drawdown_worst"] >= 1.0 or analysis["equity_balance_diff_max"] < 0.1: + if ( + analyses_combined["drawdown_worst_max"] >= 1.0 + or analyses_combined["equity_balance_diff_max_max"] >= 1.0 + ): w_0 = w_1 = modifier else: - w_0 = modifier - analysis[self.config["optimize"]["scoring"][0]] - w_1 = modifier - analysis[self.config["optimize"]["scoring"][1]] + scoring_key_0 = f"{self.config['optimize']['scoring'][0]}_mean" + scoring_key_1 = f"{self.config['optimize']['scoring'][1]}_mean" + w_0 = modifier - analyses_combined[scoring_key_0] + w_1 = modifier - analyses_combined[scoring_key_1] return w_0, w_1 def __del__(self): - if hasattr(self, "mmap_context"): - self.mmap_context.__exit__(None, None, None) + if hasattr(self, "mmap_contexts"): + for mmap_context in self.mmap_contexts.values(): + mmap_context.__exit__(None, None, None) def __getstate__(self): - # This method is called when pickling. We exclude mmap_context and shared_hlcvs_np + # This method is called when pickling. We exclude mmap_contexts and shared_hlcvs_np state = self.__dict__.copy() - del state["mmap_context"] + del state["mmap_contexts"] del state["shared_hlcvs_np"] return state def __setstate__(self, state): self.__dict__.update(state) - self.mmap_context = managed_mmap(self.shared_memory_file, self.hlcvs_dtype, self.hlcvs_shape) - self.shared_hlcvs_np = self.mmap_context.__enter__() - if self.shared_hlcvs_np is None: - print("Warning: Unable to recreate shared memory mapping during unpickling.") + self.mmap_contexts = {} + self.shared_hlcvs_np = {} + for exchange in self.exchanges: + self.mmap_contexts[exchange] = managed_mmap( + self.shared_memory_files[exchange], + self.hlcvs_dtypes[exchange], + self.hlcvs_shapes[exchange], + ) + self.shared_hlcvs_np[exchange] = self.mmap_contexts[exchange].__enter__() + if self.shared_hlcvs_np[exchange] is None: + print( + f"Warning: Unable to recreate shared memory mapping during unpickling for {exchange}." + ) def add_extra_options(parser): @@ -431,23 +490,36 @@ async def main(): old_config = deepcopy(config) update_config_with_args(config, args) config = format_config(config) - symbols, hlcvs, mss, results_path, cache_dir = await prepare_hlcvs_mss(config) - config["backtest"]["symbols"] = symbols - config["backtest"]["cache_dir"] = str(cache_dir) + exchanges = config["backtest"]["exchanges"] date_fname = ts_to_date_utc(utc_ms())[:19].replace(":", "_") - coins = [symbol_to_coin(s) for s in config["backtest"]["symbols"]] + coins = sorted( + set([symbol_to_coin(x) for y in config["backtest"]["symbols"].values() for x in y]) + ) coins_fname = "_".join(coins) if len(coins) <= 6 else f"{len(coins)}_coins" hash_snippet = uuid4().hex[:8] config["results_filename"] = make_get_filepath( - f"optimize_results/{date_fname}_{coins_fname}_{hash_snippet}_all_results.txt" + f"optimize_results/{date_fname}_{'_'.join(exchanges)}_{coins_fname}_{hash_snippet}_all_results.txt" ) try: - required_space = hlcvs.nbytes * 1.1 # Add 10% buffer - check_disk_space(tempfile.gettempdir(), required_space) - logging.info(f"Starting to create shared memory file...") - shared_memory_file = create_shared_memory_file(hlcvs) - logging.info(f"Finished creating shared memory file: {shared_memory_file}") + # Prepare data for each exchange + hlcvs_dict = {} + shared_memory_files = {} + hlcvs_shapes = {} + hlcvs_dtypes = {} + msss = {} + for exchange in exchanges: + symbols, hlcvs, mss, results_path, cache_dir = await prepare_hlcvs_mss(config, exchange) + hlcvs_dict[exchange] = hlcvs + hlcvs_shapes[exchange] = hlcvs.shape + hlcvs_dtypes[exchange] = hlcvs.dtype + msss[exchange] = mss + required_space = hlcvs.nbytes * 1.1 # Add 10% buffer + check_disk_space(tempfile.gettempdir(), required_space) + logging.info(f"Starting to create shared memory file for {exchange}...") + shared_memory_file = create_shared_memory_file(hlcvs) + shared_memory_files[exchange] = shared_memory_file + logging.info(f"Finished creating shared memory file for {exchange}: {shared_memory_file}") # Create results queue and start manager process manager = multiprocessing.Manager() @@ -461,7 +533,7 @@ async def main(): # Initialize evaluator with results queue evaluator = Evaluator( - shared_memory_file, hlcvs.shape, hlcvs.dtype, config, mss, results_queue + shared_memory_files, hlcvs_shapes, hlcvs_dtypes, config, msss, results_queue ) logging.info(f"Finished initializing evaluator...") @@ -596,12 +668,14 @@ def create_individual(): pool.terminate() pool.join() - if shared_memory_file and os.path.exists(shared_memory_file): - logging.info(f"Removing shared memory file: {shared_memory_file}") - try: - os.unlink(shared_memory_file) - except Exception as e: - logging.error(f"Error removing shared memory file: {e}") + # Remove shared memory files + for shared_memory_file in shared_memory_files.values(): + if shared_memory_file and os.path.exists(shared_memory_file): + logging.info(f"Removing shared memory file: {shared_memory_file}") + try: + os.unlink(shared_memory_file) + except Exception as e: + logging.error(f"Error removing shared memory file: {e}") logging.info("Cleanup complete. Exiting.") sys.exit(0) diff --git a/src/passivbot.py b/src/passivbot.py index ec3fac4da..8734ad1ee 100644 --- a/src/passivbot.py +++ b/src/passivbot.py @@ -296,14 +296,16 @@ def coin_to_symbol(self, coin): self.coin_to_symbol_map = {} if coin in self.coin_to_symbol_map: return self.coin_to_symbol_map[coin] + coinf = symbol_to_coin(coin) + if coinf in self.coin_to_symbol_map: + self.coin_to_symbol_map[coin] = self.coin_to_symbol_map[coinf] + return self.coin_to_symbol_map[coinf] result = coin_to_symbol( coin, eligible_symbols=self.eligible_symbols, - coin_to_symbol_map=self.coin_to_symbol_map, quote=self.quote, ) - if result == "": - self.coin_to_symbol_map[coin] = "" + self.coin_to_symbol_map[coin] = result return result async def run_execution_loop(self): diff --git a/src/procedures.py b/src/procedures.py index 55fc93191..1c3151469 100644 --- a/src/procedures.py +++ b/src/procedures.py @@ -132,9 +132,7 @@ def format_config(config: dict, verbose=True, live_only=False) -> dict: result = template elif all([k in config for k in template]): result = deepcopy(config) - elif all([k in config for k in ["analysis", "config"]]) and all( - [k in config["config"] for k in template] - ): + elif "config" in config and all([k in config["config"] for k in template]): result = deepcopy(config["config"]) elif "bot" in config and "live" in config: # live only config @@ -184,6 +182,13 @@ def format_config(config: dict, verbose=True, live_only=False) -> dict: if verbose: print(f"renaming parameter {k0} {src}: {dst}") del result[k0][src] + if "exchange" in result["backtest"] and isinstance(result["backtest"]["exchange"], str): + result["backtest"]["exchanges"] = [result["backtest"]["exchange"]] + if verbose: + print( + f"changed backtest.exchange: {result['backtest']['exchange']} -> backtest.exchanges: [{result['backtest']['exchange']}]" + ) + del result["backtest"]["exchange"] for k0 in template: for k1 in template[k0]: if k0 not in result: @@ -218,31 +223,36 @@ def format_config(config: dict, verbose=True, live_only=False) -> dict: "long": deepcopy(result["live"][k_coins]), "short": deepcopy(result["live"][k_coins]), } - eligible_symbols = get_all_eligible_symbols(result["backtest"]["exchange"]) - ignored_coins = coins_to_symbols( - set(flatten(result["live"]["ignored_coins"].values())), - eligible_symbols=eligible_symbols, - verbose=verbose, - ) - approved_coins = coins_to_symbols( - set(flatten(result["live"]["approved_coins"].values())), - eligible_symbols=eligible_symbols, - verbose=verbose, - ) - if approved_coins: - result["backtest"]["symbols"] = [ - x - for x in coins_to_symbols( - sorted(approved_coins), exchange=result["backtest"]["exchange"], verbose=verbose - ) - if x not in ignored_coins - ] - else: - result["backtest"]["symbols"] = [ - s - for s in sorted(get_all_eligible_symbols(result["backtest"]["exchange"])) - if s not in ignored_coins - ] + result["backtest"]["symbols"] = {} + for exchange in result["backtest"]["exchanges"]: + eligible_symbols = get_all_eligible_symbols(exchange) + ignored_coins = coins_to_symbols( + set(flatten(result["live"]["ignored_coins"].values())), + eligible_symbols=eligible_symbols, + exchange=exchange, + verbose=verbose, + ) + approved_coins = coins_to_symbols( + set(flatten(result["live"]["approved_coins"].values())), + eligible_symbols=eligible_symbols, + exchange=exchange, + verbose=verbose, + ) + if approved_coins: + result["backtest"]["symbols"][exchange] = [ + x + for x in coins_to_symbols( + sorted(approved_coins), + eligible_symbols=eligible_symbols, + exchange=exchange, + verbose=verbose, + ) + if x not in ignored_coins + ] + else: + result["backtest"]["symbols"][exchange] = [ + s for s in sorted(get_all_eligible_symbols(exchange)) if s not in ignored_coins + ] result["backtest"]["end_date"] = format_end_date(result["backtest"]["end_date"]) return result @@ -291,49 +301,26 @@ def get_all_eligible_symbols(exchange="binance"): raise Exception("unable to fetch or load from cache") -def coin_to_symbol(coin, eligible_symbols=None, coin_to_symbol_map={}, quote="USDT", verbose=True): - # side effect: coin_to_symbol_map might get mutated +def coin_to_symbol(coin, eligible_symbols=None, quote="USDT", verbose=True): if eligible_symbols is None: eligible_symbols = get_all_eligible_symbols() - if coin in coin_to_symbol_map: - return coin_to_symbol_map[coin] + # first check if there is a single match + candidates = {s for s in eligible_symbols if coin in s} + if len(candidates) == 1: + return next(iter(candidates)) - # first check if coin/quote:quote has a match + # next check if coin/quote:quote has a match candidate_symbol = f"{coin}/{quote}:{quote}" if candidate_symbol in eligible_symbols: - coin_to_symbol_map[coin] = candidate_symbol return candidate_symbol - # next check if there is a single match - candidates = {s for s in eligible_symbols if coin in s} - if len(candidates) == 1: - coin_to_symbol_map[coin] = next(iter(candidates)) - return coin_to_symbol_map[coin] - # next format coin (e.g. 1000SHIB -> SHIB, kPEPE -> PEPE, etc) coinf = symbol_to_coin(coin) - if coin in coin_to_symbol_map: - coin_to_symbol_map[coin] = coin_to_symbol_map[coinf] - return coin_to_symbol_map[coin] - - # first check if coinf/quote:quote has a match - candidate_symbol = f"{coinf}/{quote}:{quote}" - if candidate_symbol in eligible_symbols: - coin_to_symbol_map[coin] = candidate_symbol - return candidate_symbol - - # next check if there is a single match - candidates = {s for s in eligible_symbols if coinf in s} - if len(candidates) == 1: - coin_to_symbol_map[coin] = next(iter(candidates)) - return coin_to_symbol_map[coin] - # next check if multiple matches if len(candidates) > 1: for candidate in candidates: candidate_coin = symbol_to_coin(candidate) if candidate_coin == coinf: - coin_to_symbol_map[coin] = candidate return candidate if verbose: print(f"coin_to_symbol {coinf}: ambiguous coin, multiple candidates {candidates}") @@ -343,32 +330,10 @@ def coin_to_symbol(coin, eligible_symbols=None, coin_to_symbol_map={}, quote="US return "" -def coin_to_symbol_old(coin: str, eligible_symbols=None, verbose=True): - # formats coin to appropriate symbol - if eligible_symbols is None: - eligible_symbols = get_all_eligible_symbols() - coin = symbol_to_coin(coin) - candidates = [x for x in eligible_symbols if coin in x] - if len(candidates) == 1: - return candidates[0] - if len(candidates) == 0: - if verbose: - print(f"no candidate symbol found for {coin}") - return None - for x in candidates: - if x.replace("USDT", "") == coin: - return x - if coin == "": - return None - if verbose: - print(f"ambiguous coin: {coin}, candidates: {candidates}") - return None - - def coins_to_symbols(coins: [str], eligible_symbols=None, exchange=None, verbose=True): if eligible_symbols is None: eligible_symbols = get_all_eligible_symbols(exchange) - symbols = [coin_to_symbol(x, eligible_symbols, verbose=verbose) for x in coins] + symbols = [coin_to_symbol(x, eligible_symbols=eligible_symbols, verbose=verbose) for x in coins] return sorted(set([x for x in symbols if x])) @@ -388,6 +353,7 @@ def load_config(filepath: str, live_only=False, verbose=True) -> dict: config = format_config(config, live_only=live_only, verbose=verbose) return config except Exception as e: + traceback.print_exc() raise Exception(f"failed to load config {filepath}: {e}") @@ -1416,8 +1382,9 @@ def add_arguments_recursively(parser, config, prefix="", acronyms=set()): elif "approved_coins" in full_name: acronym = "s" type_ = comma_separated_values - elif "ignored_coins" in full_name: + elif any([x in full_name for x in ["ignored_coins", "exchanges"]]): type_ = comma_separated_values + appendix = "item1,item2,item3,..." elif "optimize_scoring" in full_name: type_ = comma_separated_values acronym = "os" diff --git a/src/pure_funcs.py b/src/pure_funcs.py index 00197cc7c..f1565e841 100644 --- a/src/pure_funcs.py +++ b/src/pure_funcs.py @@ -508,7 +508,7 @@ def get_template_live_config(passivbot_mode="neat_grid"): "base_dir": "backtests", "compress_cache": True, "end_date": "now", - "exchange": "binance", + "exchanges": ["binance", "bybit"], "start_date": "2021-05-01", "starting_balance": 100000.0, }, diff --git a/src/tools/extract_best_config.py b/src/tools/extract_best_config.py index b3aea20d6..81904e3d5 100644 --- a/src/tools/extract_best_config.py +++ b/src/tools/extract_best_config.py @@ -142,15 +142,32 @@ def process_single(file_location, verbose=False): for x in data_generator(file_location, verbose=verbose): if x: xs.append(x) + if not xs: + print_(f"No valid data found in {file_location}") + return None print_("Processing...") res = pd.DataFrame([flatten_dict(x) for x in xs]) + # Determine the prefix based on the data + if "analyses_combined" in xs[0]: + analysis_prefix = "analyses_combined_" + analysis_key = "analyses_combined" + elif "analysis" in xs[0]: + analysis_prefix = "analysis_" + analysis_key = "analysis" + else: + raise Exception("Neither 'analyses_combined' nor 'analysis' found in data") + keys, higher_is_better = ["w_0", "w_1"], [False, False] - keys = ["analysis_" + key for key in keys] - candidates = res[(res.analysis_w_0 <= 0.0) & (res.analysis_w_1 <= 0.0)][keys] + keys = [analysis_prefix + key for key in keys] + print_("n backtests", len(res)) + + # Adjust the filtering condition based on the prefix + res_keys_w_0 = res[analysis_prefix + "w_0"] + res_keys_w_1 = res[analysis_prefix + "w_1"] + candidates = res[(res_keys_w_0 <= 0.0) & (res_keys_w_1 <= 0.0)][keys] if len(candidates) == 0: candidates = res[keys] - print_("n backtests", len(res)) print_("n candidates", len(candidates)) if len(candidates) == 1: best = candidates.iloc[0].name @@ -172,15 +189,19 @@ def process_single(file_location, verbose=False): print_("best") print_(candidates.loc[best]) print_("pareto front:") - res_to_print = res[[x for x in res.columns if "analysis" in x]].loc[closest_to_ideal.index] - res_to_print.columns = [x.replace("analysis_", "") for x in res_to_print.columns] + res_to_print = res[[x for x in res.columns if analysis_prefix[:-1] in x]].loc[ + closest_to_ideal.index + ] + res_to_print.columns = [x.replace(analysis_prefix, "") for x in res_to_print.columns] print_(res_to_print) # Processing the best result for configuration best_d = xs[best] - best_d["analysis"]["n_iters"] = len(xs) - best_d.update(deepcopy(best_d["config"])) - del best_d["config"] + # Adjust for 'analysis' or 'analyses_combined' + best_d[analysis_key]["n_iters"] = len(xs) + if "config" in best_d: + best_d.update(deepcopy(best_d["config"])) + del best_d["config"] fjson = config_pretty_str(best_d) print_(fjson) coins = [s.replace("USDT", "") for s in best_d["backtest"]["symbols"]] @@ -189,6 +210,10 @@ def process_single(file_location, verbose=False): base_path = os.path.split(full_path)[0] full_path = make_get_filepath(full_path.replace(base_path, base_path + "_analysis/")) pareto_to_dump = [x for i, x in enumerate(xs) if i in pareto.index] + for i in range(len(pareto_to_dump)): + if "config" in pareto_to_dump[i]: + pareto_to_dump[i].update(deepcopy(pareto_to_dump[i]["config"])) + del pareto_to_dump[i]["config"] with open(full_path.replace(".json", "_pareto.txt"), "w") as f: for x in pareto_to_dump: f.write(json.dumps(x) + "\n")