Skip to content

Commit

Permalink
Merge pull request #419 from enarjord/v6.0.1_slices
Browse files Browse the repository at this point in the history
v6.0.1 slices
  • Loading branch information
enarjord authored Oct 29, 2023
2 parents 91733df + e8e0d57 commit 7a57883
Show file tree
Hide file tree
Showing 10 changed files with 144 additions and 63 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

:warning: **Use at own risk** :warning:

v6.0.0
v6.0.1


## Overview
Expand Down
19 changes: 11 additions & 8 deletions configs/optimize/default.hjson
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
n_cpus: 6
iters: 4000

# to reduce overfitting, perform backtest with multiple start dates, taking mean of metrics as final analysis
n_backtest_slices: 10

# score = adg per exposure weighted according to adg subdivisions
# (see configs/backtest/default.hjson)

Expand Down Expand Up @@ -96,7 +99,7 @@
n_close_orders: [2, 16]
auto_unstuck_wallet_exposure_threshold: [0.1, 0.9]
auto_unstuck_ema_dist: [-0.1, 0.003]
auto_unstuck_delay_minutes: [120.0, 1440.0]
auto_unstuck_delay_minutes: [1.0, 1440.0]
auto_unstuck_qty_pct: [0.01, 0.1]
}
short:
Expand All @@ -114,7 +117,7 @@
n_close_orders: [2, 16]
auto_unstuck_wallet_exposure_threshold: [0.1, 0.9]
auto_unstuck_ema_dist: [-0.1, 0.003]
auto_unstuck_delay_minutes: [120.0, 1440.0]
auto_unstuck_delay_minutes: [1.0, 1440.0]
auto_unstuck_qty_pct: [0.01, 0.1]
}
}
Expand All @@ -136,7 +139,7 @@
n_close_orders: [2, 16]
auto_unstuck_wallet_exposure_threshold: [0.1, 0.9]
auto_unstuck_ema_dist: [-0.1, 0.003]
auto_unstuck_delay_minutes: [120.0, 1440.0]
auto_unstuck_delay_minutes: [1.0, 1440.0]
auto_unstuck_qty_pct: [0.01, 0.1]
}
short:
Expand All @@ -155,7 +158,7 @@
n_close_orders: [2, 16]
auto_unstuck_wallet_exposure_threshold: [0.1, 0.9]
auto_unstuck_ema_dist: [-0.1, 0.003]
auto_unstuck_delay_minutes: [120.0, 1440.0]
auto_unstuck_delay_minutes: [1.0, 1440.0]
auto_unstuck_qty_pct: [0.01, 0.1]
}
}
Expand All @@ -167,9 +170,9 @@
ema_span_1: [5.0, 1440.0]
ema_dist_entry: [-0.05, 0.003]
ema_dist_close: [-0.01, 0.003]
qty_pct_entry: [0.01, 0.1]
qty_pct_entry: [0.01, 0.05]
qty_pct_close: [0.01, 0.05]
we_multiplier_entry: [0.0, 50.0]
we_multiplier_entry: [0.0, 10.0]
we_multiplier_close: [0.0, 0.0]
delay_weight_entry: [0.0, 100.0]
delay_weight_close: [0.0, 100.0]
Expand All @@ -186,9 +189,9 @@
ema_span_1: [5.0, 1440.0]
ema_dist_entry: [-0.05, 0.003]
ema_dist_close: [-0.01, 0.003]
qty_pct_entry: [0.01, 0.1]
qty_pct_entry: [0.01, 0.05]
qty_pct_close: [0.01, 0.05]
we_multiplier_entry: [0.0, 50.0]
we_multiplier_entry: [0.0, 10.0]
we_multiplier_close: [0.0, 0.0]
delay_weight_entry: [0.0, 100.0]
delay_weight_close: [0.0, 100.0]
Expand Down
10 changes: 8 additions & 2 deletions exchanges/bybit.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,10 @@ async def init_exchange_config(self):
)
logging.info(f"cross mode set {res}")
except Exception as e:
logging.error(f"error setting cross mode: {e}")
if "margin mode is not modified" in str(e):
logging.info(str(e))
else:
logging.error(f"error setting cross mode: {e}")
try:
res = await self.cc.set_position_mode(hedged=True)
logging.info(f"hedge mode set {res}")
Expand All @@ -468,4 +471,7 @@ async def init_exchange_config(self):
res = await self.cc.set_leverage(int(self.leverage), symbol=self.symbol)
logging.info(f"leverage set {res}")
except Exception as e:
logging.error(f"error setting leverage: {e}")
if "leverage not modified" in str(e):
logging.info(str(e))
else:
logging.error(f"error setting leverage: {e}")
2 changes: 1 addition & 1 deletion exchanges/kucoin.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@ async def fetch_fills(
async def fetch_latest_fills(self):
fetched = None
try:
fetched = await self.private_get(self.endpoints["recent_orders"])
fetched = await self.private_get(self.endpoints["recent_orders"], {"symbol": self.symbol})
return [
{
"order_id": elm["id"],
Expand Down
8 changes: 5 additions & 3 deletions forager.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,11 @@ def generate_yaml(
shorts_on_gs = [sym for sym in current_positions_short if sym not in active_shorts]

if config["graceful_stop"]:
longs_on_gs = sorted(set(longs_on_gs + active_longs))
longs_on_gs = current_positions_long
lw = round(twe_long / len(longs_on_gs), 4) if len(longs_on_gs) > 0 else 0.1
active_longs = []
shorts_on_gs = sorted(set(shorts_on_gs + active_shorts))
shorts_on_gs = current_positions_short
sw = round(twe_short / len(shorts_on_gs), 4) if len(shorts_on_gs) > 0 else 0.1
active_shorts = []

print("ideal_longs", sorted(ideal_longs))
Expand Down Expand Up @@ -457,7 +459,7 @@ async def main():
"--graceful_stop",
"--graceful-stop",
dest="graceful_stop",
help="set all bots to graceful stop",
help="set all bots to graceful stop; WE_limit = TWE / n_bots",
action="store_true",
)
args = parser.parse_args()
Expand Down
22 changes: 2 additions & 20 deletions harmony_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,16 +249,7 @@ def start_new_harmony(self, wi: int):
"long": deepcopy(template["long"]),
"short": deepcopy(template["short"]),
},
**{
k: self.config[k]
for k in [
"starting_balance",
"latency_simulation_ms",
"market_type",
"adg_n_subdivisions",
"slim_analysis",
]
},
**{k: self.config[k] for k in self.config["keys_to_include"]},
**{"symbol": self.symbols[0], "config_no": self.iter_counter},
}
for side in ["long", "short"]:
Expand Down Expand Up @@ -324,16 +315,7 @@ def start_new_initial_eval(self, wi: int, hm_key: str):
"long": deepcopy(self.hm[hm_key]["long"]["config"]),
"short": deepcopy(self.hm[hm_key]["short"]["config"]),
},
**{
k: self.config[k]
for k in [
"starting_balance",
"latency_simulation_ms",
"market_type",
"adg_n_subdivisions",
"slim_analysis",
]
},
**{k: self.config[k] for k in self.config["keys_to_include"]},
**{"symbol": self.symbols[0], "initial_eval_key": hm_key, "config_no": self.iter_counter},
}
line = f"starting new initial eval {config['config_no']} of {self.n_harmonies} "
Expand Down
4 changes: 2 additions & 2 deletions njit_funcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -759,7 +759,7 @@ def calc_close_grid_frontwards_long(
close_prices[0],
)
if auto_unstuck_close[0] != 0.0:
psize_ = round_(psize_ - auto_unstuck_close[0], qty_step)
psize_ = round_(psize_ - abs(auto_unstuck_close[0]), qty_step)
if psize_ < calc_min_entry_qty(
auto_unstuck_close[1], inverse, qty_step, min_qty, min_cost
):
Expand Down Expand Up @@ -951,7 +951,7 @@ def calc_close_grid_frontwards_short(
close_prices[0],
)
if auto_unstuck_close[0] != 0.0:
abs_psize_ = round_(abs_psize_ - auto_unstuck_close[0], qty_step)
abs_psize_ = round_(abs_psize_ - abs(auto_unstuck_close[0]), qty_step)
if abs_psize_ < calc_min_entry_qty(
auto_unstuck_close[1], inverse, qty_step, min_qty, min_cost
):
Expand Down
2 changes: 1 addition & 1 deletion njit_funcs_neat_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -1113,7 +1113,7 @@ def backtest_neat_grid(
print("warning: long close qty greater than long psize")
print("psize_long", psize_long)
print("pprice_long", pprice_long)
print("closes_long[0]", closes_long[0])
print("closes_long", closes_long)
close_qty_long = -psize_long
new_psize_long, pprice_long = 0.0, 0.0
psize_long = new_psize_long
Expand Down
116 changes: 111 additions & 5 deletions optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,88 @@
logging.config.dictConfig({"version": 1, "disable_existing_loggers": True})


def calc_metrics_mean(analyses):
# first analysis in analyses is full backtest
mins = [
"closest_bkr_long",
"closest_bkr_short",
"eqbal_ratio_mean_of_10_worst_long",
"eqbal_ratio_mean_of_10_worst_short",
"eqbal_ratio_min_long",
"eqbal_ratio_min_short",
]
firsts = [
"n_days",
"exchange",
"adg_long",
"adg_per_exposure_long",
"adg_weighted_long",
"adg_weighted_per_exposure_long",
"adg_short",
"adg_per_exposure_short",
"adg_weighted_short",
"adg_weighted_per_exposure_short",
"fee_sum_long",
"fee_sum_short",
"final_balance_long",
"final_balance_short",
"final_equity_long",
"final_equity_short",
"gain_long",
"gain_short",
"loss_sum_long",
"loss_sum_short",
"n_closes_long",
"n_closes_short",
"n_days",
"n_entries_long",
"n_entries_short",
"n_fills_long",
"n_fills_short",
"n_ientries_long",
"n_ientries_short",
"n_normal_closes_long",
"n_normal_closes_short",
"n_rentries_long",
"n_rentries_short",
"n_unstuck_closes_long",
"n_unstuck_closes_short",
"n_unstuck_entries_long",
"n_unstuck_entries_short",
"net_pnl_plus_fees_long",
"net_pnl_plus_fees_short",
"pnl_sum_long",
"pnl_sum_short",
"profit_sum_long",
"profit_sum_short",
"starting_balance",
"symbol",
"volume_quote_long",
"volume_quote_short",
]
maxs = [
"hrs_stuck_max_long",
"hrs_stuck_max_short",
]
analysis_combined = {}
for key in mins:
if key in analyses[0]:
analysis_combined[key] = min([a[key] for a in analyses])
for key in firsts:
if key in analyses[0]:
analysis_combined[key] = analyses[0][key]
for key in maxs:
if key in analyses[0]:
analysis_combined[key] = max([a[key] for a in analyses])
for key in analyses[0]:
if key not in analysis_combined:
try:
analysis_combined[key] = np.mean([a[key] for a in analyses])
except:
analysis_combined[key] = analyses[0][key]
return analysis_combined


def backtest_wrap(config_: dict, ticks_caches: dict):
"""
loads historical data from disk, runs backtest and returns relevant metrics
Expand All @@ -60,6 +142,7 @@ def backtest_wrap(config_: dict, ticks_caches: dict):
"market_type",
"config_no",
"adg_n_subdivisions",
"n_backtest_slices",
"slim_analysis",
]
},
Expand All @@ -71,11 +154,26 @@ def backtest_wrap(config_: dict, ticks_caches: dict):
ticks = np.load(config_["ticks_cache_fname"])
try:
assert "adg_n_subdivisions" in config
fills_long, fills_short, stats = backtest(config, ticks)
if config["slim_analysis"]:
analysis = analyze_fills_slim(fills_long, fills_short, stats, config)
else:
longs, shorts, sdf, analysis = analyze_fills(fills_long, fills_short, stats, config)
analyses = []
n_slices = max(1, config["n_backtest_slices"])
slices = [(0, len(ticks))]
if n_slices > 2:
slices += [
(
int(len(ticks) * (i / n_slices)),
min(len(ticks), int(len(ticks) * ((i + 2) / n_slices))),
)
for i in range(max(1, n_slices - 1))
]
for ia, ib in slices:
data = ticks[ia:ib]
fills_long, fills_short, stats = backtest(config, data)
if config["slim_analysis"]:
analysis = analyze_fills_slim(fills_long, fills_short, stats, config)
else:
longs, shorts, sdf, analysis = analyze_fills(fills_long, fills_short, stats, config)
analyses.append(analysis.copy())
analysis = calc_metrics_mean(analyses)
except Exception as e:
analysis = get_empty_analysis()
logging.error(f'error with {config["symbol"]} {e}')
Expand Down Expand Up @@ -382,6 +480,14 @@ async def run_opt(args, config):
logging.error(f"error loading config {args.starting_configs}: {e}")

config["starting_configs"] = cfgs
config["keys_to_include"] = [
"starting_balance",
"latency_simulation_ms",
"market_type",
"adg_n_subdivisions",
"n_backtest_slices",
"slim_analysis",
]

if config["algorithm"] == "particle_swarm_optimization":
from particle_swarm_optimization import ParticleSwarmOptimization
Expand Down
22 changes: 2 additions & 20 deletions particle_swarm_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,16 +213,7 @@ def start_new_particle_position(self, wi: int):
"long": deepcopy(template["long"]),
"short": deepcopy(template["short"]),
},
**{
k: self.config[k]
for k in [
"starting_balance",
"latency_simulation_ms",
"market_type",
"adg_n_subdivisions",
"slim_analysis",
]
},
**{k: self.config[k] for k in self.config["keys_to_include"]},
**{"symbol": self.symbols[0], "config_no": self.iter_counter},
}
for side in ["long", "short"]:
Expand Down Expand Up @@ -313,16 +304,7 @@ def start_new_initial_eval(self, wi: int, swarm_key: str):
"long": deepcopy(self.swarm[swarm_key]["long"]["config"]),
"short": deepcopy(self.swarm[swarm_key]["short"]["config"]),
},
**{
k: self.config[k]
for k in [
"starting_balance",
"latency_simulation_ms",
"market_type",
"adg_n_subdivisions",
"slim_analysis",
]
},
**{k: self.config[k] for k in self.config["keys_to_include"]},
**{
"symbol": self.symbols[0],
"initial_eval_key": swarm_key,
Expand Down

0 comments on commit 7a57883

Please sign in to comment.