Skip to content

Commit

Permalink
chore: remove hard code input dimension of model pytorch_tcts (#843)
Browse files Browse the repository at this point in the history
Co-authored-by: Jiabao Qu <qujiabao@logiocean.com>
  • Loading branch information
PalanQu and Jiabao Qu committed Jan 12, 2022
1 parent edd8bad commit 6f71f8a
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions qlib/contrib/model/pytorch_tcts.py
Expand Up @@ -56,6 +56,7 @@ def __init__(
loss="mse",
fore_optimizer="adam",
weight_optimizer="adam",
input_dim=360,
output_dim=5,
fore_lr=5e-7,
weight_lr=5e-7,
Expand Down Expand Up @@ -83,6 +84,7 @@ def __init__(
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.use_gpu = torch.cuda.is_available()
self.seed = seed
self.input_dim = input_dim
self.output_dim = output_dim
self.fore_lr = fore_lr
self.weight_lr = weight_lr
Expand Down Expand Up @@ -139,7 +141,6 @@ def loss_fn(self, pred, label, weight):
raise NotImplementedError("mode {} is not supported!".format(self.mode))

def train_epoch(self, x_train, y_train, x_valid, y_valid):

x_train_values = x_train.values
y_train_values = np.squeeze(y_train.values)

Expand Down Expand Up @@ -297,7 +298,7 @@ def training(
dropout=self.dropout,
)
self.weight_model = MLPModel(
d_feat=360 + 3 * self.output_dim + 1,
d_feat=self.input_dim + 3 * self.output_dim + 1,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
Expand Down

0 comments on commit 6f71f8a

Please sign in to comment.