-
Notifications
You must be signed in to change notification settings - Fork 2.8k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #227 from Arthur-Null/high-freq-execution
High freq execution
- Loading branch information
Showing
10 changed files
with
524 additions
and
15 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
# Universal Trading for Order Execution with Oracle Policy Distillation | ||
This is the experiment code for our AAAI 2021 paper "[Universal Trading for Order Execution with Oracle Policy Distillation](https://seqml.github.io/opd/opd_aaai21.pdf)", including the implementations of all the compared methods in the paper and a general reinforcement learning framework for order execution in quantitative finance. | ||
|
||
## Abstract | ||
As a fundamental problem in algorithmic trading, order execution aims at fulfilling a specific trading order, either liquidation or acquirement, for a given instrument. Towards effective execution strategy, recent years have witnessed the shift from the analytical view with model-based market assumptions to model-free perspective, i.e., reinforcement learning, due to its nature of sequential decision optimization. However, the noisy and yet imperfect market information that can be leveraged by the policy has made it quite challenging to build up sample efficient reinforcement learning methods to achieve effective order execution. In this paper, we propose a novel universal trading policy optimization framework to bridge the gap between the noisy yet imperfect market states and the optimal action sequences for order execution. Particularly, this framework leverages a policy distillation method that can better guide the learning of the common policy towards practically optimal execution by an oracle teacher with perfect information to approximate the optimal trading strategy. The extensive experiments have shown significant improvements of our method over various strong baselines, with reasonable trading actions. | ||
|
||
### Citation | ||
You are more than welcome to cite our paper: | ||
``` | ||
@inproceedings{fang2021universal, | ||
title={Universal Trading for Order Execution with Oracle Policy Distillation}, | ||
author={Fang, Yuchen and Ren, Kan and Liu, Weiqing and Zhou, Dong and Zhang, Weinan and Bian, Jiang and Yu, Yong and Liu, Tie-Yan}, | ||
booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, | ||
year={2021} | ||
} | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
from .ppo import * | ||
from .qmodel import * | ||
from .teacher import * | ||
from .util import * | ||
from .opd import * |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,74 @@ | ||
import torch | ||
import numpy as np | ||
from torch import nn | ||
import torch.nn.functional as F | ||
from copy import deepcopy | ||
import sys | ||
|
||
from tianshou.data import to_torch | ||
|
||
|
||
class OPD_Extractor(nn.Module): | ||
def __init__(self, device="cpu", **kargs): | ||
super().__init__() | ||
self.device = device | ||
hidden_size = kargs["hidden_size"] | ||
fc_size = kargs["fc_size"] | ||
self.cnn_shape = kargs["cnn_shape"] | ||
|
||
self.rnn = nn.GRU(64, hidden_size, batch_first=True) | ||
self.rnn2 = nn.GRU(64, hidden_size, batch_first=True) | ||
self.dnn = nn.Sequential(nn.Linear(2, 64), nn.ReLU(),) | ||
self.cnn = nn.Sequential(nn.Conv1d(self.cnn_shape[1], 3, 3), nn.ReLU(),) | ||
self.raw_fc = nn.Sequential(nn.Linear((self.cnn_shape[0] - 2) * 3, 64), nn.ReLU(),) | ||
|
||
self.fc = nn.Sequential( | ||
nn.Linear(hidden_size * 2, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 32), nn.ReLU(), | ||
) | ||
|
||
def forward(self, inp): | ||
inp = to_torch(inp, dtype=torch.float32, device=self.device) | ||
teacher_action = inp[:, 0] | ||
inp = inp[:, 1:] | ||
seq_len = inp[:, -1].to(torch.long) | ||
batch_size = inp.shape[0] | ||
raw_in = inp[:, : 6 * 240] | ||
raw_in = torch.cat((torch.zeros_like(inp[:, : 6 * 30]), raw_in), dim=-1) | ||
raw_in = raw_in.reshape(-1, 30, 6).transpose(1, 2) | ||
dnn_in = inp[:, 6 * 240 : -1].reshape(batch_size, -1, 2) | ||
cnn_out = self.cnn(raw_in).view(batch_size, 9, -1) | ||
rnn_in = self.raw_fc(cnn_out) | ||
rnn2_in = self.dnn(dnn_in) | ||
rnn2_out = self.rnn2(rnn2_in)[0] | ||
rnn_out = self.rnn(rnn_in)[0] | ||
rnn_out = rnn_out[torch.arange(rnn_out.size(0)), seq_len] | ||
rnn2_out = rnn2_out[torch.arange(rnn2_out.size(0)), seq_len] | ||
# dnn_out = self.dnn(dnn_in) | ||
fc_in = torch.cat((rnn_out, rnn2_out), dim=-1) | ||
feature = self.fc(fc_in) | ||
return feature, teacher_action / 2 | ||
|
||
|
||
class OPD_Actor(nn.Module): | ||
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs): | ||
super().__init__() | ||
self.extractor = extractor | ||
self.layer_out = nn.Sequential(nn.Linear(32, out_shape), nn.Softmax(dim=-1)) | ||
self.device = device | ||
|
||
def forward(self, obs, state=None, info={}): | ||
feature, self.teacher_action = self.extractor(obs) | ||
out = self.layer_out(feature) | ||
return out, state | ||
|
||
|
||
class OPD_Critic(nn.Module): | ||
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs): | ||
super().__init__() | ||
self.extractor = extractor | ||
self.value_out = nn.Linear(32, 1) | ||
self.device = device | ||
|
||
def forward(self, obs, state=None, info={}): | ||
feature, self.teacher_action = self.extractor(obs) | ||
return self.value_out(feature).squeeze(dim=-1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
import torch | ||
import numpy as np | ||
from torch import nn | ||
import torch.nn.functional as F | ||
from copy import deepcopy | ||
import sys | ||
|
||
from tianshou.data import to_torch | ||
|
||
|
||
class PPO_Extractor(nn.Module): | ||
def __init__(self, device="cpu", **kargs): | ||
super().__init__() | ||
self.device = device | ||
hidden_size = kargs["hidden_size"] | ||
fc_size = kargs["fc_size"] | ||
self.cnn_shape = kargs["cnn_shape"] | ||
|
||
self.rnn = nn.GRU(64, hidden_size, batch_first=True) | ||
self.rnn2 = nn.GRU(64, hidden_size, batch_first=True) | ||
self.dnn = nn.Sequential(nn.Linear(2, 64), nn.ReLU(),) | ||
self.cnn = nn.Sequential(nn.Conv1d(self.cnn_shape[1], 3, 3), nn.ReLU(),) | ||
self.raw_fc = nn.Sequential(nn.Linear((self.cnn_shape[0] - 2) * 3, 64), nn.ReLU(),) | ||
|
||
self.fc = nn.Sequential( | ||
nn.Linear(hidden_size * 2, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 32), nn.ReLU(), | ||
) | ||
|
||
def forward(self, inp): | ||
inp = to_torch(inp, dtype=torch.float32, device=self.device) | ||
# inp = torch.from_numpy(inp).to(torch.device('cpu')) | ||
seq_len = inp[:, -1].to(torch.long) | ||
batch_size = inp.shape[0] | ||
raw_in = inp[:, : 6 * 240] | ||
raw_in = torch.cat((torch.zeros_like(inp[:, : 6 * 30]), raw_in), dim=-1) | ||
raw_in = raw_in.reshape(-1, 30, 6).transpose(1, 2) | ||
dnn_in = inp[:, -19:-1].reshape(batch_size, -1, 2) | ||
cnn_out = self.cnn(raw_in).view(batch_size, 9, -1) | ||
assert not torch.isnan(cnn_out).any() | ||
rnn_in = self.raw_fc(cnn_out) | ||
assert not torch.isnan(rnn_in).any() | ||
rnn2_in = self.dnn(dnn_in) | ||
assert not torch.isnan(rnn2_in).any() | ||
rnn2_out = self.rnn2(rnn2_in)[0] | ||
assert not torch.isnan(rnn2_out).any() | ||
rnn_out = self.rnn(rnn_in)[0] | ||
assert not torch.isnan(rnn_out).any() | ||
rnn_out = rnn_out[torch.arange(rnn_out.size(0)), seq_len] | ||
rnn2_out = rnn2_out[torch.arange(rnn2_out.size(0)), seq_len] | ||
# dnn_out = self.dnn(dnn_in) | ||
fc_in = torch.cat((rnn_out, rnn2_out), dim=-1) | ||
self.feature = self.fc(fc_in) | ||
return self.feature | ||
|
||
|
||
class PPO_Actor(nn.Module): | ||
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs): | ||
super().__init__() | ||
self.extractor = extractor | ||
self.layer_out = nn.Sequential(nn.Linear(32, out_shape), nn.Softmax(dim=-1)) | ||
self.device = device | ||
|
||
def forward(self, obs, state=None, info={}): | ||
self.feature = self.extractor(obs) | ||
assert not (torch.isnan(self.feature).any() | torch.isinf(self.feature).any()), f"{self.feature}" | ||
out = self.layer_out(self.feature) | ||
return out, state | ||
|
||
|
||
class PPO_Critic(nn.Module): | ||
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs): | ||
super().__init__() | ||
self.extractor = extractor | ||
self.value_out = nn.Linear(32, 1) | ||
self.device = device | ||
|
||
def forward(self, obs, state=None, info={}): | ||
self.feature = self.extractor(obs) | ||
return self.value_out(self.feature).squeeze(dim=-1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
import torch | ||
import numpy as np | ||
from torch import nn | ||
import torch.nn.functional as F | ||
from copy import deepcopy | ||
import sys | ||
|
||
from tianshou.data import to_torch | ||
|
||
|
||
class RNNQModel(nn.Module): | ||
def __init__(self, device="cpu", out_shape=10, **kargs): | ||
super().__init__() | ||
self.device = device | ||
hidden_size = kargs["hidden_size"] | ||
fc_size = kargs["fc_size"] | ||
self.cnn_shape = kargs["cnn_shape"] | ||
|
||
self.rnn = nn.GRU(64, hidden_size, batch_first=True) | ||
self.rnn2 = nn.GRU(64, hidden_size, batch_first=True) | ||
self.dnn = nn.Sequential(nn.Linear(2, 64), nn.ReLU(),) | ||
self.cnn = nn.Sequential(nn.Conv1d(self.cnn_shape[1], 3, 3), nn.ReLU(),) | ||
self.raw_fc = nn.Sequential(nn.Linear((self.cnn_shape[0] - 2) * 3, 64), nn.ReLU(),) | ||
|
||
self.fc = nn.Sequential( | ||
nn.Linear(hidden_size * 2, hidden_size), | ||
nn.ReLU(), | ||
nn.Linear(hidden_size, 32), | ||
nn.ReLU(), | ||
nn.Linear(32, out_shape), | ||
) | ||
|
||
def forward(self, obs, state=None, info={}): | ||
inp = to_torch(obs, dtype=torch.float32, device=self.device) | ||
inp = inp[:, 182:] | ||
seq_len = inp[:, -1].to(torch.long) | ||
batch_size = inp.shape[0] | ||
raw_in = inp[:, : 6 * 240] | ||
raw_in = torch.cat((torch.zeros_like(inp[:, : 6 * 30]), raw_in), dim=-1) | ||
raw_in = raw_in.reshape(-1, 30, 6).transpose(1, 2) | ||
dnn_in = inp[:, 6 * 240 : -1].reshape(batch_size, -1, 2) | ||
cnn_out = self.cnn(raw_in).view(batch_size, 9, -1) | ||
rnn_in = self.raw_fc(cnn_out) | ||
rnn2_in = self.dnn(dnn_in) | ||
rnn2_out = self.rnn2(rnn2_in)[0] | ||
rnn_out = self.rnn(rnn_in)[0] | ||
rnn_out = rnn_out[torch.arange(rnn_out.size(0)), seq_len] | ||
rnn2_out = rnn2_out[torch.arange(rnn2_out.size(0)), seq_len] | ||
# dnn_out = self.dnn(dnn_in) | ||
fc_in = torch.cat((rnn_out, rnn2_out), dim=-1) | ||
out = self.fc(fc_in) | ||
return out, state |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,70 @@ | ||
import torch | ||
import numpy as np | ||
from torch import nn | ||
import torch.nn.functional as F | ||
from copy import deepcopy | ||
import sys | ||
|
||
from tianshou.data import to_torch | ||
|
||
|
||
class Teacher_Extractor(nn.Module): | ||
def __init__(self, device="cpu", feature_size=180, **kargs): | ||
super().__init__() | ||
self.device = device | ||
hidden_size = kargs["hidden_size"] | ||
fc_size = kargs["fc_size"] | ||
self.cnn_shape = kargs["cnn_shape"] | ||
|
||
self.rnn = nn.GRU(64, hidden_size, batch_first=True) | ||
self.rnn2 = nn.GRU(64, hidden_size, batch_first=True) | ||
self.dnn = nn.Sequential(nn.Linear(2, 64), nn.ReLU(),) | ||
self.cnn = nn.Sequential(nn.Conv1d(self.cnn_shape[1], 3, 3), nn.ReLU(),) | ||
self.raw_fc = nn.Sequential(nn.Linear((self.cnn_shape[0] - 2) * 3, 64), nn.ReLU(),) | ||
|
||
self.fc = nn.Sequential( | ||
nn.Linear(hidden_size * 2, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 32), nn.ReLU(), | ||
) | ||
|
||
def forward(self, inp): | ||
inp = to_torch(inp, dtype=torch.float32, device=self.device) | ||
inp = inp[:, 182:] | ||
seq_len = inp[:, -1].to(torch.long) | ||
batch_size = inp.shape[0] | ||
raw_in = inp[:, : 6 * 240].reshape(-1, 30, 6).transpose(1, 2) | ||
dnn_in = inp[:, 6 * 240 : -1].reshape(batch_size, -1, 2) | ||
cnn_out = self.cnn(raw_in).view(batch_size, 8, -1) | ||
rnn_in = self.raw_fc(cnn_out) | ||
rnn2_in = self.dnn(dnn_in) | ||
rnn2_out = self.rnn2(rnn2_in)[0] | ||
rnn_out = self.rnn(rnn_in)[0][:, -1, :] | ||
rnn2_out = rnn2_out[torch.arange(rnn2_out.size(0)), seq_len] | ||
# dnn_out = self.dnn(dnn_in) | ||
fc_in = torch.cat((rnn_out, rnn2_out), dim=-1) | ||
self.feature = self.fc(fc_in) | ||
return self.feature | ||
|
||
|
||
class Teacher_Actor(nn.Module): | ||
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs): | ||
super().__init__() | ||
self.extractor = extractor | ||
self.layer_out = nn.Sequential(nn.Linear(32, out_shape), nn.Softmax(dim=-1)) | ||
self.device = device | ||
|
||
def forward(self, obs, state=None, info={}): | ||
self.feature = self.extractor(obs) | ||
out = self.layer_out(self.feature) | ||
return out, state | ||
|
||
|
||
class Teacher_Critic(nn.Module): | ||
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs): | ||
super().__init__() | ||
self.extractor = extractor | ||
self.value_out = nn.Linear(32, 1) | ||
self.device = device | ||
|
||
def forward(self, obs, state=None, info={}): | ||
self.feature = self.extractor(obs) | ||
return self.value_out(self.feature).squeeze(-1) |
Oops, something went wrong.