Skip to content

Commit

Permalink
[refactor] layer layer with docs
Browse files Browse the repository at this point in the history
  • Loading branch information
martyn committed Aug 3, 2020
1 parent ed1038b commit 467d26e
Show file tree
Hide file tree
Showing 5 changed files with 111 additions and 24 deletions.
1 change: 1 addition & 0 deletions docs/SUMMARY.md
Expand Up @@ -45,6 +45,7 @@
* [cat](components/layers/cat.md)
* [channel_attention](components/layers/channel_attention.md)
* [ez_norm](components/layers/ez_norm.md)
* [layer](components/layers/layer.md)
* [mul](components/layers/mul.md)
* [multi_head_attention](components/layers/multi_head_attention.md)
* [operation](components/layers/operation.md)
Expand Down
41 changes: 41 additions & 0 deletions docs/components/layers/layer.md
@@ -0,0 +1,41 @@

---
description: 'layer layer for configurable component'
---

# layer layer

`layer` allows you to reference any layer defined in the rest of the network.

## arguments

`layer_name` - The name of the layer to use

## Optional arguments

`upsample` - If true, upsample the layer to the current size

## input size

Any 4-d tensor

## output size

if upsample true, the current input size
otherwise the layer size

## syntax

```json
"layer z"
```

## examples

```json

"identity name=encoding",
...
"add self (layer encoding upsample=true)"
```

29 changes: 5 additions & 24 deletions hypergan/configurable_component.py
Expand Up @@ -57,6 +57,7 @@ def __init__(self, gan, config, input=None, input_shape=None, context_shapes = {
"cat": hg.layers.Cat,
"channel_attention": hg.layers.ChannelAttention,
"ez_norm": hg.layers.EzNorm,
"layer": hg.layers.Layer,
"mul": hg.layers.Mul,
"multi_head_attention2": hg.layers.MultiHeadAttention, #TODO rename
"pixel_shuffle": hg.layers.PixelShuffle,
Expand All @@ -69,7 +70,6 @@ def __init__(self, gan, config, input=None, input_shape=None, context_shapes = {
"dropout": self.layer_dropout,
"identity": self.layer_identity,
"flatten": self.layer_flatten,
"layer": self.layer_layer,
"pretrained": self.layer_pretrained,
"avg_pool": self.layer_avg_pool,#TODO handle dims
"pad": self.layer_pad,
Expand Down Expand Up @@ -180,36 +180,24 @@ def parse_layer(self, layer_defn):
def build_layer(self, op, args, options):
if self.layer_ops[op]:
try:
is_hg_module = issubclass(self.layer_ops[op], hg.Layer)
is_hg_layer = issubclass(self.layer_ops[op], hg.Layer)
except TypeError:
is_hg_module = False
is_hg_layer = False

if is_hg_module:
if is_hg_layer:
net = self.layer_ops[op](self, args, options)
self.current_size = net.output_size()
elif isinstance(self.layer_ops[op], nn.Module):
net = self.layer_ops[op]
else:
#before_count = self.count_number_trainable_params()
print("Size before: ", self.current_size.dims)
net = self.layer_ops[op](None, args, options)
print("Size after: ", self.current_size.dims)
if 'name' in options:
self.set_layer(options['name'], net)

if options.trainable == False:
self.untrainable_parameters = self.untrainable_parameters.union(set(net.parameters()))
return net

#after = self.variables()
#new = set(after) - set(before)
#for j in new:
# self.layer_options[j]=options
#after_count = self.count_number_trainable_params()
#if not self.ops._reuse:
# if net == None:
# print("[Error] Layer resulted in null return value: ", op, args, options)
# raise ValidationException("Configurable layer is null")
# print("layer: ", self.ops.shape(net), op, args, after_count-before_count, "params")
else:
print("ConfigurableComponent: Op not defined", op)

Expand Down Expand Up @@ -610,11 +598,6 @@ def layer_latent(self, net, args, options):
self.current_size = LayerShape(self.gan.latent.current_input_size)
return NoOp()

def layer_layer(self, net, args, options):
if args[0] in self.layer_output_sizes:
self.current_size = self.layer_output_sizes[args[0]]
return NoOp()

def layer_linformer(self, net, args, options):
model = Linformer(
input_size = self.current_size.size(),
Expand Down Expand Up @@ -737,8 +720,6 @@ def forward(self, input, context={}):
input = module(input, context['w'])
elif layer_name == "split":
input = torch.split(input, args[0], options.dim or -1)[args[1]]
elif layer_name == "layer":
input = context[args[0]]
elif layer_name == "latent":
input = self.gan.latent.z#sample()
elif layer_name == "modulated_conv2d":
Expand Down
1 change: 1 addition & 0 deletions hypergan/layers/__init__.py
Expand Up @@ -15,6 +15,7 @@
from .cat import Cat
from .mul import Mul
from .ez_norm import EzNorm
from .layer import Layer
from .residual import Residual
from .channel_attention import ChannelAttention
from .pixel_shuffle import PixelShuffle
Expand Down
63 changes: 63 additions & 0 deletions hypergan/layers/layer.py
@@ -0,0 +1,63 @@
import torch.nn as nn
import hypergan as hg
from hypergan.layer_shape import LayerShape

class Layer(hg.Layer):
"""
---
description: 'layer layer for configurable component'
---
# layer layer
`layer` allows you to reference any layer defined in the rest of the network.
## arguments
`layer_name` - The name of the layer to use
## Optional arguments
`upsample` - If true, upsample the layer to the current size
## input size
Any 4-d tensor
## output size
if upsample true, the current input size
otherwise the layer size
## syntax
```json
"layer z"
```
## examples
```json
"identity name=encoding",
...
"add self (layer encoding upsample=true)"
```
"""
def __init__(self, component, args, options):
super(Layer, self).__init__(component, args, options)

self.name = args[0]
self.size = component.layer_output_sizes[args[0]]
if options.upsample:
self.size = LayerShape(self.size.channels, *component.current_size.dims[1:])
self.upsample = nn.Upsample(self.size.dims[1:], mode="bilinear")

def forward(self, input, context):
if hasattr(self, 'upsample'):
return self.upsample(context[self.name])
return context[self.name]

def output_size(self):
return self.size

0 comments on commit 467d26e

Please sign in to comment.