Skip to content

Commit

Permalink
Merge pull request #54 from Corwinpro/pk/dev/chrono-std-time
Browse files Browse the repository at this point in the history
Replace `std::time` with `chrono`
  • Loading branch information
nestordemeure authored Dec 22, 2023
2 parents d02a286 + 5e48606 commit 6a3bb02
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 20 deletions.
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,4 @@ rand = "0.8"
rand_distr = "0.4"
ndarray = { version = "0.15", optional = true }
serde = { version = "1.0", optional = true, features = ["derive"] }
chrono = "0.4.31"
5 changes: 3 additions & 2 deletions src/gaussian_process/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use super::GaussianProcess;
use crate::conversion::Input;
use crate::parameters::kernel::Kernel;
use crate::parameters::prior::Prior;
use chrono::Duration;
use nalgebra::{DMatrix, DVector};

/// Builder to set the parameters of a gaussian process.
Expand Down Expand Up @@ -46,7 +47,7 @@ pub struct GaussianProcessBuilder<KernelType: Kernel, PriorType: Prior>
/// Fit parameters.
max_iter: usize,
convergence_fraction: f64,
max_time: std::time::Duration,
max_time: Duration,
/// Data use for training.
training_inputs: DMatrix<f64>,
training_outputs: DVector<f64>
Expand Down Expand Up @@ -74,7 +75,7 @@ impl<KernelType: Kernel, PriorType: Prior> GaussianProcessBuilder<KernelType, Pr
let should_fit_prior = false;
let max_iter = 100;
let convergence_fraction = 0.05;
let max_time = std::time::Duration::from_secs(3600);
let max_time = Duration::seconds(3600);
// In most cases no Cholesky epsilon is needed, especially if user has
// has some noise set which is also the default. If some epsilon value
// turns out to be needed, we point the in the right direction via a
Expand Down
24 changes: 13 additions & 11 deletions src/gaussian_process/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
//!
//! ```rust
//! # use friedrich::gaussian_process::GaussianProcess;
//! # use chrono::Duration;
//! // Trains a gaussian process on a dataset of one dimension vectors.
//! let training_inputs = vec![vec![0.8], vec![1.2], vec![3.8], vec![4.2]];
//! let training_outputs = vec![3.0, 4.0, -2.0, -2.0];
Expand All @@ -25,7 +26,7 @@
//! let fit_kernel = true;
//! let max_iter = 100;
//! let convergence_fraction = 0.05;
//! let max_time = std::time::Duration::from_secs(3600);
//! let max_time = Duration::seconds(3600);
//! gp.add_samples(&additional_inputs, &additional_outputs);
//! gp.fit_parameters(fit_prior, fit_kernel, max_iter, convergence_fraction, max_time);
//!
Expand All @@ -42,6 +43,7 @@ use crate::algebra::{add_rows_cholesky_cov_matrix, make_cholesky_cov_matrix, mak
EVector};
use crate::conversion::Input;
use crate::parameters::{kernel, kernel::Kernel, prior, prior::Prior};
use chrono::Duration;
use nalgebra::{Cholesky, DMatrix, DVector, Dynamic};

mod multivariate_normal;
Expand Down Expand Up @@ -398,18 +400,18 @@ impl<KernelType: Kernel, PriorType: Prior> GaussianProcess<KernelType, PriorType
/// It runs for a maximum of `max_iter` iterations and stops prematurely if all gradients are below `convergence_fraction` time their associated parameter
/// or if it runs for more than `max_time`.
///
/// Good default values for `max_iter`, `convergence_fraction` and `max_time` are `100`, `0.05` and `std::time::Duration::from_secs(3600)` (one hour)
/// Good default values for `max_iter`, `convergence_fraction` and `max_time` are `100`, `0.05` and `chrono::Duration::seconds(3600)` (one hour)
///
/// Note that, if the `noise` parameter ends up unnaturally large after the fit, it is a good sign that the kernel is unadapted to the data.
pub fn fit_parameters(&mut self,
fit_prior: bool,
fit_kernel: bool,
max_iter: usize,
convergence_fraction: f64,
max_time: std::time::Duration)
{
if fit_prior
{
pub fn fit_parameters(
&mut self,
fit_prior: bool,
fit_kernel: bool,
max_iter: usize,
convergence_fraction: f64,
max_time: Duration,
) {
if fit_prior {
// Gets the original data back in order to update the prior.
let training_outputs =
self.training_outputs.as_vector() + self.prior.prior(&self.training_inputs.as_matrix());
Expand Down
15 changes: 9 additions & 6 deletions src/gaussian_process/optimizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
//!
//! Otherwise we fit the noise in log-scale as its magnitude matters more than its precise value.

use chrono::{Duration, Utc};

use super::GaussianProcess;
use crate::algebra::{make_cholesky_cov_matrix, make_gradient_covariance_matrices};
use crate::parameters::{kernel::Kernel, prior::Prior};
Expand Down Expand Up @@ -67,7 +69,7 @@ impl<KernelType: Kernel, PriorType: Prior> GaussianProcess<KernelType, PriorType
pub(super) fn optimize_parameters(&mut self,
max_iter: usize,
convergence_fraction: f64,
max_time: std::time::Duration)
max_time: Duration)
{
// use the ADAM gradient descent algorithm
// see [optimizing-gradient-descent](https://ruder.io/optimizing-gradient-descent/)
Expand Down Expand Up @@ -97,7 +99,7 @@ impl<KernelType: Kernel, PriorType: Prior> GaussianProcess<KernelType, PriorType
let mut mean_grad = vec![0.; parameters.len()];
let mut var_grad = vec![0.; parameters.len()];

let time_start = std::time::Instant::now();
let time_start = Utc::now();
for i in 1..=max_iter
{
let mut gradients = self.gradient_marginal_likelihood();
Expand Down Expand Up @@ -133,7 +135,7 @@ impl<KernelType: Kernel, PriorType: Prior> GaussianProcess<KernelType, PriorType
self.noise,
self.cholesky_epsilon);

if (!had_significant_progress) || (time_start.elapsed() > max_time)
if (!had_significant_progress) || (Utc::now().signed_duration_since(time_start) > max_time)
{
//println!("Iterations:{}", i);
break;
Expand Down Expand Up @@ -209,7 +211,7 @@ impl<KernelType: Kernel, PriorType: Prior> GaussianProcess<KernelType, PriorType
pub(super) fn scaled_optimize_parameters(&mut self,
max_iter: usize,
convergence_fraction: f64,
max_time: std::time::Duration)
max_time: Duration)
{
// use the ADAM gradient descent algorithm
// see [optimizing-gradient-descent](https://ruder.io/optimizing-gradient-descent/)
Expand Down Expand Up @@ -238,7 +240,7 @@ impl<KernelType: Kernel, PriorType: Prior> GaussianProcess<KernelType, PriorType
let mut mean_grad = vec![0.; parameters.len()];
let mut var_grad = vec![0.; parameters.len()];

let time_start = std::time::Instant::now();
let time_start = Utc::now();
for i in 1..=max_iter
{
let (scale, gradients) = self.scaled_gradient_marginal_likelihood();
Expand Down Expand Up @@ -267,7 +269,7 @@ impl<KernelType: Kernel, PriorType: Prior> GaussianProcess<KernelType, PriorType
self.noise,
self.cholesky_epsilon);

if (!had_significant_progress) || (time_start.elapsed() > max_time)
if (!had_significant_progress) || (Utc::now().signed_duration_since(time_start) > max_time)
{
//println!("Iterations:{}", i);
break;
Expand All @@ -280,3 +282,4 @@ impl<KernelType: Kernel, PriorType: Prior> GaussianProcess<KernelType, PriorType
self.noise);*/
}
}

4 changes: 3 additions & 1 deletion src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ mod conversion;
mod gaussian_process;
mod parameters;

use chrono::Duration;

use crate::gaussian_process::GaussianProcess;

fn main()
Expand Down Expand Up @@ -32,7 +34,7 @@ fn main()
let fit_kernel = true;
let max_iter = 100;
let convergence_fraction = 0.05;
let max_time = std::time::Duration::from_secs(3600);
let max_time = Duration::seconds(3600);
gp.add_samples(&additional_inputs, &additional_outputs);
gp.fit_parameters(fit_prior, fit_kernel, max_iter, convergence_fraction, max_time);
println!("model is now updated.");
Expand Down

0 comments on commit 6a3bb02

Please sign in to comment.