Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[neural-networks] restructured training of neural networks #1274

Merged
merged 7 commits into from Apr 22, 2021
18 changes: 9 additions & 9 deletions src/pymor/models/neural_network.py
Expand Up @@ -67,7 +67,7 @@ def __init__(self, neural_network, parameters={}, output_functional=None,
def _compute_solution(self, mu=None, **kwargs):

# convert the parameter `mu` into a form that is usable in PyTorch
converted_input = torch.from_numpy(mu.to_numpy()).double()
converted_input = torch.DoubleTensor(mu.to_numpy())
# obtain (reduced) coordinates by forward pass of the parameter values
# through the neural network
U = self.neural_network(converted_input).data.numpy()
Expand Down Expand Up @@ -141,7 +141,7 @@ def _compute_solution(self, mu=None, **kwargs):
for i in range(self.nt):
mu = mu.with_(t=t)
# convert the parameter `mu` into a form that is usable in PyTorch
converted_input = torch.from_numpy(mu.to_numpy()).double()
converted_input = torch.DoubleTensor(mu.to_numpy())
# obtain (reduced) coordinates by forward pass of the parameter values
# through the neural network
result_neural_network = self.neural_network(converted_input).data.numpy()
Expand All @@ -160,24 +160,24 @@ class FullyConnectedNN(nn.Module, BasicObject):

Parameters
----------
layers_sizes
layer_sizes
List of sizes (i.e. number of neurons) for the layers of the neural network.
activation_function
Function to use as activation function between the single layers.
"""

def __init__(self, layers_sizes, activation_function=torch.tanh):
def __init__(self, layer_sizes, activation_function=torch.tanh):
super().__init__()

if layers_sizes is None or not len(layers_sizes) > 1 or not all(size >= 1 for size in layers_sizes):
if layer_sizes is None or not len(layer_sizes) > 1 or not all(size >= 1 for size in layer_sizes):
raise ValueError

self.input_dimension = layers_sizes[0]
self.output_dimension = layers_sizes[-1]
self.input_dimension = layer_sizes[0]
self.output_dimension = layer_sizes[-1]

self.layers = nn.ModuleList()
self.layers.extend([nn.Linear(int(layers_sizes[i]), int(layers_sizes[i+1]))
for i in range(len(layers_sizes) - 1)])
self.layers.extend([nn.Linear(int(layer_sizes[i]), int(layer_sizes[i+1]))
for i in range(len(layer_sizes) - 1)])

self.activation_function = activation_function

Expand Down