Skip to content

Commit

Permalink
uniform init the codebooks, thanks to @ramyamounir for pointing this out
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Jul 11, 2022
1 parent dea416f commit abbc41a
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 3 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
setup(
name = 'vector_quantize_pytorch',
packages = find_packages(),
version = '0.8.0',
version = '0.8.1',
license='MIT',
description = 'Vector Quantization - Pytorch',
author = 'Phil Wang',
Expand Down
9 changes: 7 additions & 2 deletions vector_quantize_pytorch/vector_quantize_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,11 @@ def l2norm(t):
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))

def uniform_init(*shape):
t = torch.empty(shape)
nn.init.kaiming_uniform_(t)
return t

def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
Expand Down Expand Up @@ -162,7 +167,7 @@ def __init__(
):
super().__init__()
self.decay = decay
init_fn = torch.randn if not kmeans_init else torch.zeros
init_fn = uniform_init if not kmeans_init else torch.zeros
embed = init_fn(codebook_size, dim)

self.codebook_size = codebook_size
Expand Down Expand Up @@ -268,7 +273,7 @@ def __init__(
self.decay = decay

if not kmeans_init:
embed = l2norm(torch.randn(codebook_size, dim))
embed = l2norm(uniform_init(codebook_size, dim))
else:
embed = torch.zeros(codebook_size, dim)

Expand Down

0 comments on commit abbc41a

Please sign in to comment.