Skip to content

Commit

Permalink
[neural-networks] allow numpy arrays as training data
Browse files Browse the repository at this point in the history
  • Loading branch information
HenKlei committed Apr 13, 2021
1 parent c8a06af commit def68b4
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 15 deletions.
4 changes: 2 additions & 2 deletions src/pymor/models/neural_network.py
Expand Up @@ -67,7 +67,7 @@ def __init__(self, neural_network, parameters={}, output_functional=None,
def _compute_solution(self, mu=None, **kwargs):

# convert the parameter `mu` into a form that is usable in PyTorch
converted_input = torch.from_numpy(mu.to_numpy()).double()
converted_input = torch.DoubleTensor(mu.to_numpy())
# obtain (reduced) coordinates by forward pass of the parameter values
# through the neural network
U = self.neural_network(converted_input).data.numpy()
Expand Down Expand Up @@ -141,7 +141,7 @@ def _compute_solution(self, mu=None, **kwargs):
for i in range(self.nt):
mu = mu.with_(t=t)
# convert the parameter `mu` into a form that is usable in PyTorch
converted_input = torch.from_numpy(mu.to_numpy()).double()
converted_input = torch.DoubleTensor(mu.to_numpy())
# obtain (reduced) coordinates by forward pass of the parameter values
# through the neural network
result_neural_network = self.neural_network(converted_input).data.numpy()
Expand Down
26 changes: 13 additions & 13 deletions src/pymor/reductors/neural_network.py
Expand Up @@ -230,12 +230,10 @@ def build_basis(self):
return reduced_basis, mean_square_loss

def _compute_sample(self, mu, u, reduced_basis):
"""Transform parameter and corresponding solution to tensors."""
"""Transform parameter and corresponding solution to |NumPy arrays|."""
# determine the coefficients of the full-order solutions in the reduced basis to obtain
# the training data; convert everything into tensors that are compatible with PyTorch
mu_tensor = torch.DoubleTensor(mu.to_numpy())
u_tensor = torch.DoubleTensor(reduced_basis.inner(u)[:, 0])
return [(mu_tensor, u_tensor)]
# the training data
return [(mu.to_numpy(), reduced_basis.inner(u)[:, 0])]

def reconstruct(self, u):
"""Reconstruct high-dimensional vector from reduced vector `u`."""
Expand Down Expand Up @@ -338,12 +336,12 @@ def build_basis(self):
return reduced_basis, mean_square_loss

def _compute_sample(self, mu, u, reduced_basis):
"""Transform parameter and corresponding solution to tensors
"""Transform parameter and corresponding solution to |NumPy arrays|
(make sure to include the time instances in the inputs).
"""
parameters_with_time = [mu.with_(t=t) for t in np.linspace(0, self.fom.T, self.nt)]

samples = [(torch.DoubleTensor(mu.to_numpy()), torch.DoubleTensor(reduced_basis.inner(u_t)[:, 0]))
samples = [(mu.to_numpy(), reduced_basis.inner(u_t)[:, 0])
for mu, u_t in zip(parameters_with_time, u)]

return samples
Expand Down Expand Up @@ -437,13 +435,15 @@ def train_neural_network(training_data, validation_data, neural_network,
----------
training_data
Data to use during the training phase. Has to be a list of tuples,
where each tuple consists of two PyTorch-tensors (`torch.DoubleTensor`).
The first tensor contains the input data, the second tensor contains
where each tuple consists of two elements that are either
PyTorch-tensors (`torch.DoubleTensor`) or |NumPy arrays|.
The first element contains the input data, the second element contains
the target values.
validation_data
Data to use during the validation phase. Has to be a list of tuples,
where each tuple consists of two PyTorch-tensors (`torch.DoubleTensor`).
The first tensor contains the input data, the second tensor contains
where each tuple consists of two elements that are either
PyTorch-tensors (`torch.DoubleTensor`) or |NumPy arrays|.
The first element contains the input data, the second element contains
the target values.
neural_network
The neural network to train (can also be a pre-trained model).
Expand Down Expand Up @@ -481,8 +481,8 @@ def train_neural_network(training_data, validation_data, neural_network,
for data in training_data, validation_data:
assert isinstance(data, list)
assert all(isinstance(datum, tuple) and len(datum) == 2 for datum in data)
assert all(isinstance(datum[0], torch.DoubleTensor) for datum in data)
assert all(isinstance(datum[1], torch.DoubleTensor) for datum in data)
assert all(isinstance(datum[0], torch.DoubleTensor) or isinstance(datum[0], np.ndarray) for datum in data)
assert all(isinstance(datum[1], torch.DoubleTensor) or isinstance(datum[1], np.ndarray) for datum in data)

optimizer = optim.LBFGS if 'optimizer' not in training_parameters else training_parameters['optimizer']
epochs = 1000 if 'epochs' not in training_parameters else training_parameters['epochs']
Expand Down

0 comments on commit def68b4

Please sign in to comment.