Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deterministic results #45

Merged
merged 2 commits into from
Apr 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 23 additions & 6 deletions DeepINN/config.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,39 @@
import torch
import random
import numpy as np

class Config:
"""
Set up configuration such as the data-types, number generation seeds etc.
We use this Config object to apply during the training. So, one doesn't need to apply seeds in each jupyter notebook cell.
"""
def __init__(self, float_type=torch.float32, random_seed=42, device = 'cuda'):
self.float_type = float_type
def __init__(self, torch_type=torch.float32, torch_seed=0, random_seed=0, numpy_seed=0, device = 'cuda'):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

self.float_type = torch_type
self.torch_seed = torch_seed
self.random_seed = random_seed
self.numpy_seed = numpy_seed
self.device = device
# Add more configuration parameters as needed

self.apply_seeds()
self.apply_float_type()
self.default_device()
# self.apply_seeds()
# self.apply_float_type()
# self.default_device()
def load_all_configs(self):
self.torch_seeds()
self.random_seeds()
self.numpy_seeds()

def apply_seeds(self):
def torch_seeds(self):
torch.manual_seed(self.random_seed)

def random_seeds(self):
random.seed(self.random_seed)

def numpy_seeds(self):
np.random.seed(self.numpy_seed)

def apply_float_type(self):
torch.set_default_dtype(self.float_type)
Expand Down
7 changes: 4 additions & 3 deletions DeepINN/constraint/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,10 @@ def sampler_object(self):
else:
return self.sampler(self.geom.boundary, n_points = self.no_points, filter_fn = self.filter_fn)

def sample_labels(self, sampled_points):
def sample_labels(self, sampled_points: list):
# evaluate the BC labels at each training point.
return self.function(sampled_points)
#return torch.tensor(list(map(self.function, sampled_points)))
return self.function(sampled_points)

class PDE():
"""
Expand All @@ -72,7 +73,7 @@ def __init__(self, geom, sampling_strategy, no_points, filter_fn = None) -> None
if filter_fn is not None:
raise NotImplementedError("Filter function isn't implemented for collocation points. Use filter_fn=None.")

# assign the correct sampling strategy
# assign the correct sampling strategy
if self.sampling_strategy in strategy_dict:
self.sampler = strategy_dict[self.sampling_strategy]
else:
Expand Down
17 changes: 10 additions & 7 deletions DeepINN/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,18 +52,21 @@ def initialise_training(self, iterations : int = None):
self.training_history = [] # Initialize an empty list for storing loss values
self.iterations = iterations
# Load all the seeds, data types, devices etc.
# self.config.apply_seeds()
self.config.torch_seeds()
self.config.random_seeds()
self.config.numpy_seeds()
# self.config.apply_float_type()
# self.config.default_device()

# In 1D problem we need to combine the BCs as there is only one point for each BC, which returns an undefined feature scaling because the ub and lb are same in the denominator, so we get infinity
# For problem with multiple points on each boundary, we don't need to combine them.
if self.boundary_point_sample[0].size()[0] == 1: # if row is 1 in the particular boundary tensor
self.boundary_point_sample = torch.cat(self.boundary_point_sample, dim=0)
self.boundary_point_labels = torch.cat(self.boundary_point_labels, dim=0)
else:
self.boundary_point_sample = torch.cat(self.boundary_point_sample, dim=0)
self.boundary_point_labels = torch.cat(self.boundary_point_labels, dim=0)
#if self.boundary_point_sample[0].size()[0] == 1: # if row is 1 in the particular boundary tensor
self.boundary_point_sample = torch.cat(self.boundary_point_sample, dim=0)
self.boundary_point_labels = torch.cat(self.boundary_point_labels, dim=0)
# TODO: Currently all BCs are included in a single tensor. Meaning we can visualise/ print the BC loss on individual boundaries similar to DeepXDE.
# else:
# self.boundary_point_sample = torch.cat(self.boundary_point_sample, dim=0)
# self.boundary_point_labels = torch.cat(self.boundary_point_labels, dim=0)

# Set requires_grad=True for self.collocation_point_sample
self.collocation_point_sample.requires_grad = True
Expand Down
3 changes: 2 additions & 1 deletion DeepINN/nn/FCNN.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ def weight_init(self):
self.initialiser(self.linears[i].weight.data, gain=1.0)

# set biases to zero
torch.nn.init.zeros_(self.linears[i].bias.data)
# torch.nn.init.zeros_(self.linears[i].bias.data)
torch.nn.init.constant_(self.linears[i].bias.data, 0.01)

def forward(self, input):
"""
Expand Down
2 changes: 1 addition & 1 deletion Tutorials/5. FCNN/3. model.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# FCNN training"
"# 1D Laplace Equation "
]
},
{
Expand Down
428 changes: 428 additions & 0 deletions Tutorials/6. 2D heat conduction/1. Laplace_FCDNN.ipynb

Large diffs are not rendered by default.

397 changes: 0 additions & 397 deletions Tutorials/6. 2D heat conduction/1. model.ipynb

This file was deleted.

454 changes: 454 additions & 0 deletions Tutorials/6. 2D heat conduction/2. Laplace_FCDNN_simple_BC.ipynb

Large diffs are not rendered by default.

449 changes: 449 additions & 0 deletions Tutorials/6. 2D heat conduction/3. Laplace_discontinuous_FCDNN.ipynb

Large diffs are not rendered by default.

Large diffs are not rendered by default.

6 changes: 5 additions & 1 deletion docs/_toc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,8 @@ parts:
chapters:
- file: Tutorials/5. FCNN/1. basic.ipynb
- file: Tutorials/5. FCNN/2. test.ipynb
- file: Tutorials/5. FCNN/3. model.ipynb
- file: Tutorials/5. FCNN/3. model.ipynb
- caption: 2D heat conduction
chapters:
- file: Tutorials/6. 2D heat conduction/1. Laplace_FCDNN.ipynb
- file; Tutorials/6. 2D heat conduction/2. Laplace_FourierNN.ipynb
2 changes: 2 additions & 0 deletions todo.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ Last work : `DeepINN/constraint/gradients.py`
- [x] Constraints tutorials.
- [ ] Add template PDE in constraint directory.
- [x] There was some problem with FCNN tutorial. It was incorrect PDE.
- [x] Add 2D discontinuous heat conduction problem
- [x] Add Fourier neural network and Deep Galerkin method architecture.

## Misc
- [x] Migrate to JupyterBooks.
Expand Down
Loading