Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add direction and direction_neuron objectives #6

Merged
merged 8 commits into from Jun 23, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
63 changes: 62 additions & 1 deletion lucent/optvis/objectives.py
Expand Up @@ -132,6 +132,67 @@ def inner(model):
return inner


@wrap_objective()
def direction(layer, direction, batch=None):
"""Visualize a direction

InceptionV1 example:
> direction = torch.rand(512, device=device)
> obj = objectives.direction(layer='mixed4c', direction=direction)

Args:
layer: Name of layer in model (string)
direction: Direction to visualize. torch.Tensor of shape (num_channels,)
batch: Batch number (int)

Returns:
Objective

"""

@handle_batch(batch)
def inner(model):
return -torch.nn.CosineSimilarity(dim=1)(direction.reshape(
(1, -1, 1, 1)), model(layer)).mean()

return inner


@wrap_objective()
def direction_neuron(layer,
direction,
x=None,
y=None,
batch=None):
"""Visualize a single (x, y) position along the given direction

Similar to the neuron objective, defaults to the center neuron.

InceptionV1 example:
> direction = torch.rand(512, device=device)
> obj = objectives.direction_neuron(layer='mixed4c', direction=direction)

Args:
layer: Name of layer in model (string)
direction: Direction to visualize. torch.Tensor of shape (num_channels,)
batch: Batch number (int)

Returns:
Objective

"""

@handle_batch(batch)
def inner(model):
# breakpoint()
layer_t = model(layer)
layer_t = _extract_act_pos(layer_t, x, y)
return -torch.nn.CosineSimilarity(dim=1)(direction.reshape(
(1, -1, 1, 1)), layer_t).mean()

return inner


def _torch_blur(tensor, out_c=3):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
depth = tensor.shape[1]
Expand Down Expand Up @@ -241,7 +302,7 @@ def inner(model):
batch, channels, _, _ = layer_t.shape
flattened = layer_t.view(batch, channels, -1)
grams = torch.matmul(flattened, torch.transpose(flattened, 1, 2))
grams = F.normalize(grams, p=2, dim=(1, 2))
grams = F.normalize(grams, p=2, dim=(1, 2))
return -sum([ sum([ (grams[i]*grams[j]).sum()
for j in range(batch) if j != i])
for i in range(batch)]) / batch
Expand Down
16 changes: 14 additions & 2 deletions tests/optvis/test_objectives.py
Expand Up @@ -77,10 +77,10 @@ def test_linear_transform(inceptionv1_model):


def test_mul_div_raises():
with pytest.raises(Exception) as excinfo:
with pytest.raises(Exception) as excinfo:
objective = objectives.channel("mixed4a", 0) / objectives.channel("mixed4a", 0)
assert str(excinfo.value) == "Can only divide by int or float. Received type <class 'lucent.optvis.objectives.Objective'>"
with pytest.raises(Exception) as excinfo:
with pytest.raises(Exception) as excinfo:
objective = objectives.channel("mixed4a", 0) * objectives.channel("mixed4a", 0)
assert str(excinfo.value) == "Can only multiply by int or float. Received type <class 'lucent.optvis.objectives.Objective'>"

Expand All @@ -103,3 +103,15 @@ def test_alignment(inceptionv1_model):
def test_diversity(inceptionv1_model):
objective = objectives.channel("mixed4a", 0) - 100 * objectives.diversity("mixed4a")
assert_gradient_descent(objective, inceptionv1_model)


def test_direction(inceptionv1_model):
direction = torch.rand(512) * 1000
objective = objectives.direction(layer='mixed4c', direction=direction)
assert_gradient_descent(objective, inceptionv1_model)


def test_direction_neuron(inceptionv1_model):
direction = torch.rand(512) * 1000
objective = objectives.direction_neuron(layer='mixed4c', direction=direction)
assert_gradient_descent(objective, inceptionv1_model)