Skip to content

Commit

Permalink
Managing to actually use sparse matrices
Browse files Browse the repository at this point in the history
Now 2x real-time!
  • Loading branch information
jmvalin committed Nov 29, 2018
1 parent 5c366b7 commit 7df3f9c
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 4 deletions.
12 changes: 10 additions & 2 deletions src/dump_lpcnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,11 +67,19 @@ def printSparseVector(f, A, name):
A[:,N:2*N] = A[:,N:2*N] - np.diag(np.diag(A[:,N:2*N]))
A[:,2*N:] = A[:,2*N:] - np.diag(np.diag(A[:,2*N:]))
printVector(f, diag, name + '_diag')
idx = np.zeros((0,), dtype='int')
for i in range(3*N//16):
pos = idx.shape[0]
idx = np.append(idx, -1)
nb_nonzero = 0
for j in range(N):
W = np.concatenate([W, A[j, i*16:(i+1)*16]])
if np.sum(np.abs(A[j, i*16:(i+1)*16])) > 1e-10:
nb_nonzero = nb_nonzero + 1
idx = np.append(idx, j)
W = np.concatenate([W, A[j, i*16:(i+1)*16]])
idx[pos] = nb_nonzero
printVector(f, W, name)
idx = np.tile(np.concatenate([np.array([N]), np.arange(N)]), 3*N//16)
#idx = np.tile(np.concatenate([np.array([N]), np.arange(N)]), 3*N//16)
printVector(f, idx, name + '_idx', dtype='int')
return;

Expand Down
4 changes: 3 additions & 1 deletion src/lpcnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def on_batch_end(self, batch, logs=None):
pass
else:
#print("constrain");
layer = self.model.get_layer('cu_dnngru_1')
layer = self.model.get_layer('gru_a')
w = layer.get_weights()
p = w[1]
nb = p.shape[1]//p.shape[0]
Expand All @@ -72,13 +72,15 @@ def on_batch_end(self, batch, logs=None):
for k in range(nb):
A = p[:, k*N:(k+1)*N]
A = A - np.diag(np.diag(A))
A = np.transpose(A, (1, 0))
L=np.reshape(A, (N, N//16, 16))
S=np.sum(L*L, axis=-1)
SS=np.sort(np.reshape(S, (-1,)))
thresh = SS[round(N*N//16*(1-density))]
mask = (S>=thresh).astype('float32');
mask = np.repeat(mask, 16, axis=1)
mask = np.minimum(1, mask + np.diag(np.ones((N,))))
mask = np.transpose(mask, (1, 0))
p[:, k*N:(k+1)*N] = p[:, k*N:(k+1)*N]*mask
#print(thresh, np.mean(mask))
w[1] = p
Expand Down
2 changes: 1 addition & 1 deletion src/train_lpcnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@
in_data = np.concatenate([in_data, pred], axis=-1)

# dump models to disk as we go
checkpoint = ModelCheckpoint('lpcnet9_384_10_G16_{epoch:02d}.h5')
checkpoint = ModelCheckpoint('lpcnet9b_384_10_G16_{epoch:02d}.h5')

#model.load_weights('wavenet4f2_30.h5')
model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
Expand Down

0 comments on commit 7df3f9c

Please sign in to comment.