You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
C:\Users\Lenovo\PycharmProjects\ladder\ladder\encoder.py:92: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
h = self.activation(z)
C:\Users\Lenovo\PycharmProjects\ladder\ladder\encoder.py:74: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
h = self.activation(z_gb)
Traceback (most recent call last):
File "C:/Users/Lenovo/PycharmProjects/ladder/ladder/ladder.py", line 282, in
main()
File "C:/Users/Lenovo/PycharmProjects/ladder/ladder/ladder.py", line 248, in main
bn_hat_z_layers_unlabelled = ladder.decoder_bn_hat_z_layers(hat_z_layers_unlabelled, z_pre_layers_unlabelled)
File "C:/Users/Lenovo/PycharmProjects/ladder/ladder/ladder.py", line 50, in decoder_bn_hat_z_layers
return self.de.bn_hat_z_layers(hat_z_layers, z_pre_layers)
File "C:\Users\Lenovo\PycharmProjects\ladder\ladder\decoder.py", line 150, in bn_hat_z_layers
hat_z_normalized = torch.div(hat_z - ones.mm(mean), ones.mm(torch.sqrt(var + 1e-10)))
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
Process finished with exit code 1
`
The text was updated successfully, but these errors were encountered:
`D:\anaconda3\envs\pytorch\python.exe C:/Users/Lenovo/PycharmProjects/ladder/ladder/ladder.py --batch 100 --epochs 20 --noise_std 0.2 --data_dir ..\utils\data
BATCH SIZE: 100
EPOCHS: 20
RANDOM SEED: 42
NOISE STD: 0.2
LR DECAY EPOCH: 15
CUDA: False
Loading Data
========NETWORK=======
Ladder(
(se): StackedEncoders(
(encoders): Sequential(
(encoder_0): Encoder(
(linear): Linear(in_features=784, out_features=1000, bias=False)
(bn_normalize_clean): BatchNorm1d(1000, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(bn_normalize): BatchNorm1d(1000, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(activation): ReLU()
)
(encoder_1): Encoder(
(linear): Linear(in_features=1000, out_features=500, bias=False)
(bn_normalize_clean): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(bn_normalize): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(activation): ReLU()
)
(encoder_2): Encoder(
(linear): Linear(in_features=500, out_features=250, bias=False)
(bn_normalize_clean): BatchNorm1d(250, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(bn_normalize): BatchNorm1d(250, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(activation): ReLU()
)
(encoder_3): Encoder(
(linear): Linear(in_features=250, out_features=250, bias=False)
(bn_normalize_clean): BatchNorm1d(250, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(bn_normalize): BatchNorm1d(250, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(activation): ReLU()
)
(encoder_4): Encoder(
(linear): Linear(in_features=250, out_features=250, bias=False)
(bn_normalize_clean): BatchNorm1d(250, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(bn_normalize): BatchNorm1d(250, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(activation): ReLU()
)
(encoder_5): Encoder(
(linear): Linear(in_features=250, out_features=10, bias=False)
(bn_normalize_clean): BatchNorm1d(10, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(bn_normalize): BatchNorm1d(10, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(activation): Softmax(dim=None)
)
)
)
(de): StackedDecoders(
(bn_u_top): BatchNorm1d(10, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
(decoders): Sequential(
(decoder_0): Decoder(
(V): Linear(in_features=10, out_features=250, bias=False)
(bn_normalize): BatchNorm1d(250, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
)
(decoder_1): Decoder(
(V): Linear(in_features=250, out_features=250, bias=False)
(bn_normalize): BatchNorm1d(250, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
)
(decoder_2): Decoder(
(V): Linear(in_features=250, out_features=250, bias=False)
(bn_normalize): BatchNorm1d(250, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
)
(decoder_3): Decoder(
(V): Linear(in_features=250, out_features=500, bias=False)
(bn_normalize): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
)
(decoder_4): Decoder(
(V): Linear(in_features=500, out_features=1000, bias=False)
(bn_normalize): BatchNorm1d(1000, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
)
(decoder_5): Decoder(
(V): Linear(in_features=1000, out_features=784, bias=False)
(bn_normalize): BatchNorm1d(784, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
)
)
(bottom_decoder): Decoder()
)
(bn_image): BatchNorm1d(784, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True)
)
==UNSUPERVISED-COSTS==
[0.1, 0.1, 0.1, 0.1, 0.1, 10.0, 1000.0]
=====================
TRAINING
C:\Users\Lenovo\PycharmProjects\ladder\ladder\encoder.py:92: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
h = self.activation(z)
C:\Users\Lenovo\PycharmProjects\ladder\ladder\encoder.py:74: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
h = self.activation(z_gb)
Traceback (most recent call last):
File "C:/Users/Lenovo/PycharmProjects/ladder/ladder/ladder.py", line 282, in
main()
File "C:/Users/Lenovo/PycharmProjects/ladder/ladder/ladder.py", line 248, in main
bn_hat_z_layers_unlabelled = ladder.decoder_bn_hat_z_layers(hat_z_layers_unlabelled, z_pre_layers_unlabelled)
File "C:/Users/Lenovo/PycharmProjects/ladder/ladder/ladder.py", line 50, in decoder_bn_hat_z_layers
return self.de.bn_hat_z_layers(hat_z_layers, z_pre_layers)
File "C:\Users\Lenovo\PycharmProjects\ladder\ladder\decoder.py", line 150, in bn_hat_z_layers
hat_z_normalized = torch.div(hat_z - ones.mm(mean), ones.mm(torch.sqrt(var + 1e-10)))
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
Process finished with exit code 1
`
The text was updated successfully, but these errors were encountered: