Skip to content

Commit

Permalink
Merge pull request #1120 from HenKlei/feature-nns-instationary
Browse files Browse the repository at this point in the history
Neural networks for instationary problems
  • Loading branch information
sdrave committed Nov 4, 2020
2 parents 9f08abc + 9402b0d commit b81e178
Show file tree
Hide file tree
Showing 6 changed files with 309 additions and 19 deletions.
3 changes: 3 additions & 0 deletions AUTHORS.md
Expand Up @@ -16,6 +16,9 @@
* rename estimate --> estimate_error and estimator -> error_estimator
* avoid nested Product and Lincomb Functionals and Functions

* Hendrik Kleikamp, hendrik.kleikamp@uni-muenster.de
* artificial neural networks for instationary problems

## pyMOR 2020.1

* Linus Balicki, linus.balicki@ovgu.de
Expand Down
7 changes: 6 additions & 1 deletion docs/source/bibliography.rst
Expand Up @@ -93,7 +93,7 @@ Bibliography
.. [HU18] J. Hesthaven, S. Ubbiali,
Non-intrusive reduced order modeling of nonlinear problems using neural networks,
Journal of Computational Physics, 363, 55-78, 2018.
Journal of Computational Physics, 363, pp. 55-78, 2018.
.. [PK16] P. Kürschner,
Efficient Low-Rank Solution of Large-Scale Matrix Equations,
Expand Down Expand Up @@ -141,6 +141,11 @@ Bibliography
Second Order Systems and DAEs,
PhD thesis, Virginia Tech, 2012
.. [WHR19] Q. Wang, J. Hesthaven, D. Ray,
Non-intrusive reduced order modeling of unsteady flows using artificial
neural networks with application to a combustion problem,
Journal of Computational Physics, 384, pp. 289-307, 2019.
.. [XZ11] Y. Xu and T. Zeng, Optimal :math:`\mathcal{H}_2` Model
Reduction for Large Scale MIMO Systems via Tangential
Interpolation,
Expand Down
87 changes: 83 additions & 4 deletions src/pymor/models/neural_network.py
Expand Up @@ -27,6 +27,9 @@ class NeuralNetworkModel(Model):
:class:`~pymor.models.neural_network.FullyConnectedNN` with input size that
matches the (total) number of parameters and output size equal to the
dimension of the reduced space.
parameters
|Parameters| of the reduced order model (the same as used in the full-order
model).
output_functional
|Operator| mapping a given solution to the model output. In many applications,
this will be a |Functional|, i.e. an |Operator| mapping to scalars.
Expand All @@ -51,14 +54,14 @@ class NeuralNetworkModel(Model):
Name of the model.
"""

def __init__(self, neural_network, output_functional=None, products=None,
error_estimator=None, visualizer=None, name=None):
def __init__(self, neural_network, parameters={}, output_functional=None,
products=None, error_estimator=None, visualizer=None, name=None):

super().__init__(products=products, error_estimator=error_estimator, visualizer=visualizer, name=name)
super().__init__(products=products, error_estimator=error_estimator,
visualizer=visualizer, name=name)

self.__auto_init(locals())
self.solution_space = NumpyVectorSpace(neural_network.output_dimension)
self.linear = output_functional is None or output_functional.linear
if output_functional is not None:
self.output_space = output_functional.range

Expand All @@ -73,6 +76,82 @@ def _compute_solution(self, mu=None, **kwargs):

return U


class NeuralNetworkInstationaryModel(Model):
"""Class for models of instationary problems that use artificial neural networks.
This class implements a |Model| that uses a neural network for solving.
Parameters
----------
T
The final time T.
nt
The number of time steps.
neural_network
The neural network that approximates the mapping from parameter space
to solution space. Should be an instance of
:class:`~pymor.models.neural_network.FullyConnectedNN` with input size that
matches the (total) number of parameters and output size equal to the
dimension of the reduced space.
parameters
|Parameters| of the reduced order model (the same as used in the full-order
model).
output_functional
|Operator| mapping a given solution to the model output. In many applications,
this will be a |Functional|, i.e. an |Operator| mapping to scalars.
This is not required, however.
products
A dict of inner product |Operators| defined on the discrete space the
problem is posed on. For each product with key `'x'` a corresponding
attribute `x_product`, as well as a norm method `x_norm` is added to
the model.
error_estimator
An error estimator for the problem. This can be any object with
an `estimate_error(U, mu, m)` method. If `error_estimator` is
not `None`, an `estimate_error(U, mu)` method is added to the
model which will call `error_estimator.estimate_error(U, mu, self)`.
visualizer
A visualizer for the problem. This can be any object with
a `visualize(U, m, ...)` method. If `visualizer`
is not `None`, a `visualize(U, *args, **kwargs)` method is added
to the model which forwards its arguments to the
visualizer's `visualize` method.
name
Name of the model.
"""

def __init__(self, T, nt, neural_network, parameters={}, output_functional=None,
products=None, error_estimator=None, visualizer=None, name=None):

super().__init__(products=products, error_estimator=error_estimator,
visualizer=visualizer, name=name)

self.__auto_init(locals())
self.solution_space = NumpyVectorSpace(neural_network.output_dimension)
if output_functional is not None:
self.output_space = output_functional.range

def _compute_solution(self, mu=None, **kwargs):

U = self.solution_space.empty(reserve=self.nt)
dt = self.T / (self.nt - 1)
t = 0.

# iterate over time steps
for i in range(self.nt):
mu = mu.with_(t=t)
# convert the parameter `mu` into a form that is usable in PyTorch
converted_input = torch.from_numpy(mu.to_numpy()).double()
# obtain (reduced) coordinates by forward pass of the parameter values through the neural network
result_neural_network = self.neural_network(converted_input).data.numpy()
# convert plain numpy array to element of the actual solution space
U.append(self.solution_space.make_array(result_neural_network))
t += dt

return U


class FullyConnectedNN(nn.Module, BasicObject):
"""Class for neural networks with fully connected layers.
Expand Down
141 changes: 127 additions & 14 deletions src/pymor/reductors/neural_network.py
Expand Up @@ -8,6 +8,8 @@
if config.HAVE_TORCH:
from numbers import Number

import numpy as np

import torch
import torch.nn as nn
import torch.optim as optim
Expand All @@ -16,7 +18,7 @@
from pymor.algorithms.pod import pod
from pymor.core.base import BasicObject
from pymor.core.exceptions import NeuralNetworkTrainingFailed
from pymor.models.neural_network import FullyConnectedNN, NeuralNetworkModel
from pymor.models.neural_network import FullyConnectedNN, NeuralNetworkModel, NeuralNetworkInstationaryModel


class NeuralNetworkReductor(BasicObject):
Expand Down Expand Up @@ -124,7 +126,7 @@ def reduce(self, hidden_layers='[(N+P)*3, (N+P)*3]', activation_function=torch.t
# input and output size of the neural network are prescribed by the dimension of the parameter space
# and the reduced basis size
assert isinstance(hidden_layers, list)
layers = [len(self.fom.parameters),] + hidden_layers + [len(self.reduced_basis),]
layers = self._compute_layers_sizes(hidden_layers)

# compute validation data
if not hasattr(self, 'validation_data'):
Expand All @@ -133,10 +135,8 @@ def reduce(self, hidden_layers='[(N+P)*3, (N+P)*3]', activation_function=torch.t
if self.validation_set:
self.validation_data = []
for mu in self.validation_set:
mu_tensor = torch.DoubleTensor(mu.to_numpy())
u = self.fom.solve(mu)
u_tensor = torch.DoubleTensor(self.reduced_basis.inner(u)[:,0])
self.validation_data.append((mu_tensor, u_tensor))
sample = self._compute_sample(mu, self.fom.solve(mu), self.reduced_basis)
self.validation_data.extend(sample)
else:
number_validation_snapshots = int(len(self.training_data)*self.validation_ratio)
self.validation_data = self.training_data[0:number_validation_snapshots]
Expand Down Expand Up @@ -180,11 +180,14 @@ def reduce(self, hidden_layers='[(N+P)*3, (N+P)*3]', activation_function=torch.t
else:
raise ValueError('Unknown value for mean squared error of neural network')

def _compute_layers_sizes(self, hidden_layers):
"""Compute the number of neurons in the layers of the neural network."""
return [len(self.fom.parameters),] + hidden_layers + [len(self.reduced_basis),]

def _build_rom(self):
"""Construct the reduced order model."""
with self.logger.block('Building ROM ...'):
rom = NeuralNetworkModel(self.neural_network, name=f'{self.fom.name}_reduced')
rom = NeuralNetworkModel(self.neural_network, self.fom.parameters, name=f'{self.fom.name}_reduced')

return rom

Expand Down Expand Up @@ -279,8 +282,6 @@ def closure():

def build_basis(self):
"""Compute a reduced basis using proper orthogonal decomposition."""
self.training_data = []

with self.logger.block('Building reduced basis ...'):

# compute snapshots for POD and training of neural networks
Expand All @@ -294,24 +295,136 @@ def build_basis(self):
atol=self.atol / 2., l2_err=self.l2_err / 2.,
**(self.pod_params or {}))

# determine the coefficients of the full-order solutions in the reduced basis to obtain the
# training data; convert everything into tensors that are compatible with PyTorch
self.training_data = []
for mu, u in zip(self.training_set, U):
mu_tensor = torch.DoubleTensor(mu.to_numpy())
u_tensor = torch.DoubleTensor(reduced_basis.inner(u)[:,0])
self.training_data.append((mu_tensor, u_tensor))
sample = self._compute_sample(mu, u, reduced_basis)
self.training_data.extend(sample)

# compute mean square loss
mean_square_loss = (sum(U.norm2()) - sum(svals**2)) / len(U)

return reduced_basis, mean_square_loss

def _compute_sample(self, mu, u, reduced_basis):
"""Transform parameter and corresponding solution to tensors."""
# determine the coefficients of the full-order solutions in the reduced basis to obtain the
# training data; convert everything into tensors that are compatible with PyTorch
mu_tensor = torch.DoubleTensor(mu.to_numpy())
u_tensor = torch.DoubleTensor(reduced_basis.inner(u)[:,0])
return [(mu_tensor, u_tensor),]

def reconstruct(self, u):
"""Reconstruct high-dimensional vector from reduced vector `u`."""
assert hasattr(self, 'reduced_basis')
return self.reduced_basis.lincomb(u.to_numpy())


class NeuralNetworkInstationaryReductor(NeuralNetworkReductor):
"""Reduced Basis reductor for instationary problems relying on
artificial neural networks.
This is a reductor that constructs a reduced basis using proper
orthogonal decomposition and trains a neural network that approximates
the mapping from parameter and time space to coefficients of the
full-order solution in the reduced basis.
The approach is described in [WHR19]_.
Parameters
----------
fom
The full-order |Model| to reduce.
training_set
Set of |parameter values| to use for POD and training of the
neural network.
validation_set
Set of |parameter values| to use for validation in the training
of the neural network.
validation_ratio
Fraction of the training set to use for validation in the training
of the neural network (only used if no validation set is provided).
basis_size
Desired size of the reduced basis. If `None`, rtol, atol or l2_err must
be provided.
rtol
Relative tolerance the basis should guarantee on the training set.
atol
Absolute tolerance the basis should guarantee on the training set.
l2_err
L2-approximation error the basis should not exceed on the training
set.
pod_params
Dict of additional parameters for the POD-method.
ann_mse
If `'like_basis'`, the mean squared error of the neural network on
the training set should not exceed the error of projecting onto the basis.
If `None`, the neural network with smallest validation error is
used to build the ROM.
If a tolerance is prescribed, the mean squared error of the neural
network on the training set should not exceed this threshold.
Training is interrupted if a neural network that undercuts the
error tolerance is found.
"""

def __init__(self, fom, training_set, validation_set=None, validation_ratio=0.1,
basis_size=None, rtol=0., atol=0., l2_err=0., pod_params=None,
ann_mse='like_basis'):
assert 0 < validation_ratio < 1 or validation_set
self.__auto_init(locals())

def _compute_layers_sizes(self, hidden_layers):
"""Compute the number of neurons in the layers of the neural network
(make sure to increase the input dimension to account for the time)."""
return [len(self.fom.parameters) + 1,] + hidden_layers + [len(self.reduced_basis),]

def _build_rom(self):
"""Construct the reduced order model."""
with self.logger.block('Building ROM ...'):
rom = NeuralNetworkInstationaryModel(self.fom.T, self.nt, self.neural_network,
self.fom.parameters, name=f'{self.fom.name}_reduced')

return rom

def build_basis(self):
"""Compute a reduced basis using proper orthogonal decomposition."""
with self.logger.block('Building reduced basis ...'):

# compute snapshots for POD and training of neural networks
with self.logger.block('Computing training snapshots ...'):
U = self.fom.solution_space.empty()
for mu in self.training_set:
u = self.fom.solve(mu)
if hasattr(self, 'nt'):
assert self.nt == len(u)
else:
self.nt = len(u)
U.append(u)

# compute reduced basis via POD
reduced_basis, svals = pod(U, modes=self.basis_size, rtol=self.rtol / 2.,
atol=self.atol / 2., l2_err=self.l2_err / 2.,
**(self.pod_params or {}))

self.training_data = []
for i, mu in enumerate(self.training_set):
sample = self._compute_sample(mu, U[i*self.nt:(i+1)*self.nt], reduced_basis)
self.training_data.extend(sample)

# compute mean square loss
mean_square_loss = (sum(U.norm2()) - sum(svals**2)) / len(U)

return reduced_basis, mean_square_loss

def _compute_sample(self, mu, u, reduced_basis):
"""Transform parameter and corresponding solution to tensors
(make sure to include the time instances in the inputs)."""
parameters_with_time = [mu.with_(t=t) for t in np.linspace(0, self.fom.T, self.nt)]

samples = [(torch.DoubleTensor(mu.to_numpy()), torch.DoubleTensor(reduced_basis.inner(u_t)[:,0]))
for mu, u_t in zip(parameters_with_time, u)]

return samples


class EarlyStoppingScheduler(BasicObject):
"""Class for performing early stopping in training of neural networks.
Expand Down

0 comments on commit b81e178

Please sign in to comment.