Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Issue#33 #34

Merged
merged 2 commits into from Oct 9, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
34 changes: 30 additions & 4 deletions deeprob/flows/layers/autoregressive.py
Expand Up @@ -54,6 +54,7 @@ def __init__(

# Preserve the input ordering
self.ordering = degrees[0]
self.inv_ordering = np.argsort(self.ordering)

# Initialize the conditioner neural network
layers = []
Expand All @@ -78,22 +79,47 @@ def apply_backward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return u, inv_log_det_jacobian

def apply_forward(self, u: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# If autograd is enabled then plug in the implementation that enables backpropagation
if torch.is_grad_enabled():
return self.__backprop_apply_forward(u)

# Initialize arbitrarily
x = torch.zeros_like(u)
log_det_jacobian = torch.zeros_like(u)

# This requires D iterations where D is the number of features
# Get the parameters and apply the affine transformation (forward mode)
for i in range(self.in_features):
for i in self.inv_ordering:
z = self.network(x)
t, s = torch.chunk(z, chunks=2, dim=1)
s = self.scale_act(s)
idx = np.argwhere(self.ordering == i).item()
x[:, idx] = u[:, idx] * torch.exp(s[:, idx]) + t[:, idx]
log_det_jacobian[:, idx] = s[:, idx]
x[:, i] = u[:, i] * torch.exp(s[:, i]) + t[:, i]
log_det_jacobian[:, i] = s[:, i]
log_det_jacobian = torch.sum(log_det_jacobian, dim=1)
return x, log_det_jacobian

def __backprop_apply_forward(self, u: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Forward implementation of an autoregressive layer that enables backpropagation (but slower).

:param u: The inputs.
:return: The transformed samples and the forward log-det-jacobian.
"""
# Initialize arbitrarily, and unstack along the features dimension to enable backpropagation
x = list(torch.unbind(torch.zeros_like(u), dim=1))
ldj = list(torch.unbind(torch.zeros_like(u), dim=1))

# This requires D iterations where D is the number of features
# Get the parameters and apply the affine transformation (forward mode)
for i in self.inv_ordering:
z = self.network(torch.stack(x, dim=1))
t, s = torch.chunk(z, chunks=2, dim=1)
s = self.scale_act(s)
x[i] = u[:, i] * torch.exp(s[:, i]) + t[:, i]
ldj[i] = s[:, i]
log_det_jacobian = torch.sum(torch.stack(ldj, dim=1), dim=1)
return torch.stack(x, dim=1), log_det_jacobian

def build_degrees_sequential(self, depth: int, units: int, reverse: bool) -> List[np.ndarray]:
"""
Build sequential degrees for the linear layers of the autoregressive network.
Expand Down
23 changes: 23 additions & 0 deletions deeprob/flows/models/base.py
Expand Up @@ -11,6 +11,8 @@


class NormalizingFlow(ProbabilisticModel):
has_rsample = True

def __init__(
self,
in_features,
Expand Down Expand Up @@ -152,6 +154,27 @@ def sample(self, n_samples: int, y: Optional[torch.Tensor] = None) -> torch.Tens
x, _ = self.unpreprocess(x)
return x

def rsample(self, n_samples: int, y: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Sample some values from the modeled distribution by reparametrization.
Unlike :func:`NormalizingFlow.sample`, this method allows backpropagation.

:param n_samples: The number of samples.
:param y: The samples labels. It can be None.
:return: The samples.
"""
# Sample from the base distribution (should have rsample method)
if not self.in_base.has_rsample:
raise NotImplementedError("Base distribution must support parametrized sampling")
x = self.in_base.rsample([n_samples])

# Apply forward transformations
x, _ = self.apply_forward(x)

# Apply reversed preprocessing transformation
x, _ = self.unpreprocess(x)
return x

def apply_backward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Apply the backward transformation.
Expand Down
2 changes: 2 additions & 0 deletions deeprob/torch/base.py
Expand Up @@ -11,6 +11,8 @@
class ProbabilisticModel(abc.ABC, nn.Module):
"""Abstract Probabilistic Model base class."""

has_rsample = False

def log_prob(self, x: torch.Tensor) -> torch.Tensor:
"""
Compute the log-likelihood of a batched sample.
Expand Down
11 changes: 11 additions & 0 deletions tests/test_flows.py
Expand Up @@ -25,6 +25,13 @@ def assert_flow_inverse(flow, data):
assert torch.allclose(orig_data, data, atol=5e-7)


def assert_sampling_autograd(flow):
with torch.enable_grad():
samples = flow.rsample(64)
assert samples.requires_grad
samples.mean().backward()


def test_squeeze_depth2d(data):
squeezed_data = squeeze_depth2d(data)
unsqueezed_data = unsqueeze_depth2d(squeezed_data)
Expand Down Expand Up @@ -66,6 +73,7 @@ def test_realnvp1d(flattened_data):
dequantize=True, logit=0.01
).eval()
assert_flow_inverse(realnvp, flattened_data)
assert_sampling_autograd(realnvp)
with pytest.raises(ValueError):
RealNVP1d(10, n_flows=0)
with pytest.raises(ValueError):
Expand All @@ -88,6 +96,7 @@ def test_realnvp2d(data):
dequantize=True, logit=0.01
).eval()
assert_flow_inverse(realnvp, data)
assert_sampling_autograd(realnvp)
with pytest.raises(ValueError):
RealNVP2d((3, 8, 8), n_flows=0)
with pytest.raises(ValueError):
Expand All @@ -112,9 +121,11 @@ def test_maf(flattened_data):
dequantize=True, logit=0.01
).eval()
assert_flow_inverse(maf, flattened_data)
assert_sampling_autograd(maf)
with pytest.raises(ValueError):
MAF(10, n_flows=0)
with pytest.raises(ValueError):
MAF(10, depth=0)
with pytest.raises(ValueError):
MAF(10, units=0)