-
Notifications
You must be signed in to change notification settings - Fork 77
/
autograd_mlpg_perf.py
115 lines (97 loc) · 3.33 KB
/
autograd_mlpg_perf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
from __future__ import division, print_function, absolute_import
from nnmnkwii import paramgen as G
from nnmnkwii import autograd as AF
from torch.autograd import Variable
import torch
from torch import nn
import numpy as np
import time
import sys
def _get_windows_set():
windows_set = [
# Static
[
(0, 0, np.array([1.0])),
],
# Static + delta
[
(0, 0, np.array([1.0])),
(1, 1, np.array([-0.5, 0.0, 0.5])),
],
# Static + delta + deltadelta
[
(0, 0, np.array([1.0])),
(1, 1, np.array([-0.5, 0.0, 0.5])),
(1, 1, np.array([1.0, -2.0, 1.0])),
],
]
return windows_set
OKGREEN = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def benchmark_mlpg(static_dim=59, T=100, batch_size=10, use_cuda=True):
if use_cuda and not torch.cuda.is_available():
return
windows = _get_windows_set()[-1]
np.random.seed(1234)
torch.manual_seed(1234)
means = np.random.rand(T, static_dim * len(windows)).astype(np.float32)
variances = np.ones(static_dim * len(windows))
reshaped_means = G.reshape_means(means, static_dim)
# Ppseud target
y = G.mlpg(means, variances, windows).astype(np.float32)
# Pack into variables
means = Variable(torch.from_numpy(means), requires_grad=True)
reshaped_means = Variable(
torch.from_numpy(reshaped_means), requires_grad=True)
y = Variable(torch.from_numpy(y), requires_grad=False)
criterion = nn.MSELoss()
# Case 1: MLPG
since = time.time()
for _ in range(batch_size):
y_hat = AF.mlpg(means, torch.from_numpy(variances), windows)
L = criterion(y_hat, y)
assert np.allclose(y_hat.data.numpy(), y.data.numpy())
L.backward() # slow!
elapsed_mlpg = time.time() - since
# Case 2: UnitVarianceMLPG
since = time.time()
if use_cuda:
y = y.cuda()
R = G.unit_variance_mlpg_matrix(windows, T)
R = torch.from_numpy(R)
# Assuming minibatch are zero-ppaded, we only need to create MLPG matrix
# per-minibatch, not per-utterance.
if use_cuda:
R = R.cuda()
for _ in range(batch_size):
if use_cuda:
means = means.cpu()
means = means.cuda()
y_hat = AF.unit_variance_mlpg(R, means)
L = criterion(y_hat, y)
assert np.allclose(y_hat.cpu().data.numpy(), y.cpu().data.numpy(),
atol=1e-5)
L.backward()
elapsed_unit_variance_mlpg = time.time() - since
ratio = elapsed_mlpg / elapsed_unit_variance_mlpg
print(
"MLPG vs UnitVarianceMLPG (static_dim, T, batch_size, use_cuda) = ({}):".format(
(static_dim, T, batch_size, use_cuda)))
if ratio > 1:
s = "faster"
sys.stdout.write(OKGREEN)
else:
s = "slower"
sys.stdout.write(FAIL)
print("UnitVarianceMLPG, {:4f} times {}. Elapsed times {:4f} / {:4f}".format(
ratio, s, elapsed_mlpg, elapsed_unit_variance_mlpg))
print(ENDC)
if __name__ == "__main__":
for use_cuda in [False, True]:
for static_dim in [24, 59]:
for T in [500, 1000]:
for batch_size in [1, 5, 10]:
benchmark_mlpg(
static_dim=static_dim, T=T,
batch_size=batch_size, use_cuda=use_cuda)