diff --git a/README.md b/README.md index 14e06ea..4036040 100644 --- a/README.md +++ b/README.md @@ -60,8 +60,8 @@ assert!(bs.len() == 3); // number of elements equals to number of hidden layers ```rust // Dummy problem returning random fitness. -pub struct SphereProblem; -impl Problem for SphereProblem { +pub struct DummyProblem; +impl Problem for DummyProblem { // Function to evaluate a specific individual. fn compute(&self, ind: &mut T) -> f32 { // use `to_vec` to get real-coded representation of an individual. @@ -78,32 +78,28 @@ impl Problem for SphereProblem { ```rust // Dummy problem returning random fitness. struct RandomNEProblem {} - impl RandomNEProblem { fn new() -> RandomNEProblem { RandomNEProblem{} } } - impl NeuroProblem for RandomNEProblem { // return number of NN inputs. - fn get_inputs_count(&self) -> usize {1} + fn get_inputs_num(&self) -> usize {1} // return number of NN outputs. - fn get_outputs_count(&self) -> usize {1} - // return NN with random weights and a fixed structure. For now the structure should be the same all the time to make sure that crossover is possible. Likely to change in the future as I get more hang of Rust. + fn get_outputs_num(&self) -> usize {1} + // return NN with random weights and a fixed structure. For now the structure should be the same all the time to make sure that crossover is possible. Likely to change in the future. fn get_default_net(&self) -> MultilayeredNetwork { let mut rng = rand::thread_rng(); - let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_count(), self.get_outputs_count()); + let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num()); net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid) .build(&mut rng, NeuralArchitecture::Multilayered); net } - // Function to evaluate performance of a given NN. fn compute_with_net(&self, nn: &mut T) -> f32 { let mut rng: StdRng = StdRng::from_seed(&[0]); - - let mut input = (0..self.get_inputs_count()) + let mut input = (0..self.get_inputs_num()) .map(|_| rng.gen::()) .collect::>(); // compute NN output using random input. @@ -112,4 +108,5 @@ impl NeuroProblem for RandomNEProblem { } } + ``` diff --git a/examples/ga.rs b/examples/ga.rs index a0a41b3..3db5a9d 100644 --- a/examples/ga.rs +++ b/examples/ga.rs @@ -1,8 +1,6 @@ extern crate rand; extern crate revonet; -//use rand::{Rng, StdRng, SeedableRng}; - use revonet::ea::*; use revonet::ga::*; use revonet::problem::*; diff --git a/src/ga.rs b/src/ga.rs index 719d5f8..7e2e95e 100644 --- a/src/ga.rs +++ b/src/ga.rs @@ -10,6 +10,29 @@ use settings::*; /// Baseline structure for [Genetic Algorithm](https://en.wikipedia.org/wiki/Genetic_algorithm) +/// +/// # Example: Running GA to solve minimization problem: +/// ``` +/// extern crate rand; +/// extern crate revonet; +/// +/// use revonet::ea::*; +/// use revonet::ga::*; +/// use revonet::problem::*; +/// use revonet::settings::*; +/// +/// fn main() { +/// let pop_size = 20u32; +/// let problem_dim = 10u32; +/// let problem = SphereProblem{}; +/// +/// let gen_count = 10u32; +/// let settings = EASettings::new(pop_size, gen_count, problem_dim); +/// let mut ga: GA = GA::new(&problem); +/// let res = ga.run(settings).expect("Error during GA run"); +/// println!("\n\nGA results: {:?}", res); +/// } +/// ``` pub struct GA<'a, P: Problem + 'a> { /// Context structure containing information about GA run, its progress and results. ctx: Option>, diff --git a/src/ne.rs b/src/ne.rs index 8c2e630..84aa20e 100644 --- a/src/ne.rs +++ b/src/ne.rs @@ -119,6 +119,28 @@ impl Individual for NEIndividual { //================================================================================ /// Structure for neuroevolutionary algorithm. +/// +/// # Example: Run neuroevolutionary algorithm to solve XOR problem. +/// ``` +/// extern crate revonet; +/// +/// use revonet::ea::*; +/// use revonet::ne::*; +/// use revonet::neproblem::*; +/// use revonet::settings::*; +/// +/// fn main() { +/// let (pop_size, gen_count, param_count) = (20, 50, 100); // gene_count does not matter here as NN structure is defined by a problem. +/// let settings = EASettings::new(pop_size, gen_count, param_count); +/// let problem = XorProblem::new(); +/// +/// let mut ne: NE = NE::new(&problem); +/// let res = ne.run(settings).expect("Error: NE result is empty"); +/// println!("result: {:?}", res); +/// println!("\nbest individual: {:?}", res.best); +/// } +/// ``` + pub struct NE<'a, P: Problem + 'a> { /// Context structure containing information about GA run, its progress and results. ctx: Option>, diff --git a/src/neproblem.rs b/src/neproblem.rs index be006b7..12b272d 100644 --- a/src/neproblem.rs +++ b/src/neproblem.rs @@ -9,6 +9,57 @@ use problem::*; //-------------------------------------------- /// Trait for problem where NN is a solution. +/// +/// # Example: Custom NE problem +/// ``` +/// extern crate revonet; +/// extern crate rand; +/// +/// use rand::{Rng, SeedableRng, StdRng}; +/// +/// use revonet::ea::*; +/// use revonet::ne::*; +/// use revonet::neuro::*; +/// use revonet::neproblem::*; +/// +/// // Dummy problem returning random fitness. +/// struct RandomNEProblem {} +/// +/// impl RandomNEProblem { +/// fn new() -> RandomNEProblem { +/// RandomNEProblem{} +/// } +/// } +/// +/// impl NeuroProblem for RandomNEProblem { +/// // return number of NN inputs. +/// fn get_inputs_num(&self) -> usize {1} +/// // return number of NN outputs. +/// fn get_outputs_num(&self) -> usize {1} +/// // return NN with random weights and a fixed structure. For now the structure should be the same all the time to make sure that crossover is possible. Likely to change in the future. +/// fn get_default_net(&self) -> MultilayeredNetwork { +/// let mut rng = rand::thread_rng(); +/// let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num()); +/// net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid) +/// .build(&mut rng, NeuralArchitecture::Multilayered); +/// net +/// } +/// +/// // Function to evaluate performance of a given NN. +/// fn compute_with_net(&self, nn: &mut T) -> f32 { +/// let mut rng: StdRng = StdRng::from_seed(&[0]); +/// +/// let mut input = (0..self.get_inputs_num()) +/// .map(|_| rng.gen::()) +/// .collect::>(); +/// // compute NN output using random input. +/// let mut output = nn.compute(&input); +/// output[0] +/// } +/// } +/// +/// fn main() {} +/// ``` pub trait NeuroProblem: Problem { /// Number of input variables. fn get_inputs_num(&self) -> usize; diff --git a/src/neuro.rs b/src/neuro.rs index 96ae8d6..cd40492 100644 --- a/src/neuro.rs +++ b/src/neuro.rs @@ -108,25 +108,10 @@ impl MultilayeredNetwork { /// * `size` - number of nodes in a layer. /// * `actf` - type of activation function. pub fn add_hidden_layer(&mut self, size: usize, actf: ActivationFunctionType) -> &mut Self { - self.add_layer(size, actf, false) - } - - /// Add a hidden layer with bypass (skip) connections, so that its output also contains input signals. - /// - /// Panics if the network has already been initialized . - /// - /// # Arguments: - /// * `size` - number of nodes in a layer. - /// * `actf` - type of activation function. - pub fn add_hidden_bypass_layer(&mut self, size: usize, actf: ActivationFunctionType) -> &mut Self { - self.add_layer(size, actf, true) - } - - fn add_layer(&mut self, size: usize, actf: ActivationFunctionType, is_bypass: bool) -> &mut Self { if self.is_built { panic!("Can not add layer to already built network."); } - self.layers.push(Box::new(NeuralLayer::new(size, actf, is_bypass))); + self.layers.push(Box::new(NeuralLayer::new(size, actf))); self } @@ -142,7 +127,7 @@ impl MultilayeredNetwork { self.arch = arch; // add output layer. - self.layers.push(Box::new(NeuralLayer::new(self.outputs_num, ActivationFunctionType::Linear, false))); + self.layers.push(Box::new(NeuralLayer::new(self.outputs_num, ActivationFunctionType::Linear))); // init weights and biases for all layers. let mut inputs = self.inputs_num; @@ -314,8 +299,6 @@ pub struct NeuralLayer { outputs: Vec, /// Type of activation function for every node in the layer. activation: ActivationFunctionType, - /// Indicates whether the layer implements skip connections to propagate input signals to output. - is_bypass: bool, } #[allow(dead_code)] @@ -325,7 +308,7 @@ impl NeuralLayer { /// # Arguments: /// * `size` - number of nodes. /// * `actf` - type of activation function. - pub fn new(size: usize, actf: ActivationFunctionType, is_bypass: bool) -> NeuralLayer { + pub fn new(size: usize, actf: ActivationFunctionType) -> NeuralLayer { NeuralLayer{ size: size, inputs_num: 0usize, @@ -333,7 +316,6 @@ impl NeuralLayer { biases: Vec::new(), outputs: Vec::new(), activation: actf, - is_bypass: is_bypass, } } @@ -411,9 +393,23 @@ pub enum ActivationFunctionType { Relu, } +/// Trait to generalize behaviour of activation function. pub trait ActivationFunction: Debug { + /// Compute activation function value using given argument. + /// Implementation should call `ActivationFunction::compute_static` to avoid duplication. + /// + /// # Arguments: + /// * `x` - value of argument of activation function. fn compute(&self, x: f32) -> f32; + + /// Compute activation function value using given argument. + /// This is a static variant of the function `compute`. + /// + /// # Arguments: + /// * `x` - value of argument of activation function. fn compute_static(x: f32) -> f32; + + /// Construct an activation function instance. fn new() -> Self; } @@ -450,6 +446,12 @@ impl ActivationFunction for ReluActivation { } } +/// Computes activation function outputs by using array of arguments. +/// The result is written into the input array. +/// +/// # Arguments: +/// * `xs` - array of arguments for vector of activation functions. +/// * `actf` - element of the `ActivationFunctionType` denoting type of activation. pub fn compute_activations_inplace(xs: &mut [f32], actf: ActivationFunctionType) { let actf_ptr: fn(f32) -> f32; match actf { diff --git a/src/problem.rs b/src/problem.rs index 940f907..eaac9d8 100644 --- a/src/problem.rs +++ b/src/problem.rs @@ -5,7 +5,37 @@ use ea::*; /// Represents baseline interface for the objective function. /// -/// By default solution is a vector of real-numbers. +/// By default solution is represented by a vector of real-numbers. +/// +/// # Example: Custom optimization problem +/// ``` +/// extern crate revonet; +/// extern crate rand; +/// +/// use rand::{Rng, SeedableRng, StdRng}; +/// +/// use revonet::ea::*; +/// use revonet::problem::*; +/// +/// // Dummy problem returning random fitness. +/// pub struct DummyProblem; +/// +/// impl Problem for DummyProblem { +/// // Function to evaluate a specific individual. +/// fn compute(&self, ind: &mut T) -> f32 { +/// // use `to_vec` to get real-coded representation of an individual. +/// let v = ind.to_vec().unwrap(); +/// +/// // Perform calculations as per optimization problem being implemened. +/// // Here just a random value is returned. +/// let mut rng: StdRng = StdRng::from_seed(&[0]); +/// rng.gen::() +/// } +/// } +/// +/// fn main() {} +/// ``` + pub trait Problem{ /// Returns whether given fitness value is enough to be a solution. ///