Skip to content

Commit

Permalink
Added more examples for problems and algorithms.
Browse files Browse the repository at this point in the history
  • Loading branch information
yurytsoy committed Aug 7, 2017
1 parent aab0b5e commit 7cad1cc
Show file tree
Hide file tree
Showing 7 changed files with 158 additions and 35 deletions.
19 changes: 8 additions & 11 deletions README.md
Expand Up @@ -60,8 +60,8 @@ assert!(bs.len() == 3); // number of elements equals to number of hidden layers

```rust
// Dummy problem returning random fitness.
pub struct SphereProblem;
impl Problem for SphereProblem {
pub struct DummyProblem;
impl Problem for DummyProblem {
// Function to evaluate a specific individual.
fn compute<T: Individual>(&self, ind: &mut T) -> f32 {
// use `to_vec` to get real-coded representation of an individual.
Expand All @@ -78,32 +78,28 @@ impl Problem for SphereProblem {
```rust
// Dummy problem returning random fitness.
struct RandomNEProblem {}

impl RandomNEProblem {
fn new() -> RandomNEProblem {
RandomNEProblem{}
}
}

impl NeuroProblem for RandomNEProblem {
// return number of NN inputs.
fn get_inputs_count(&self) -> usize {1}
fn get_inputs_num(&self) -> usize {1}
// return number of NN outputs.
fn get_outputs_count(&self) -> usize {1}
// return NN with random weights and a fixed structure. For now the structure should be the same all the time to make sure that crossover is possible. Likely to change in the future as I get more hang of Rust.
fn get_outputs_num(&self) -> usize {1}
// return NN with random weights and a fixed structure. For now the structure should be the same all the time to make sure that crossover is possible. Likely to change in the future.
fn get_default_net(&self) -> MultilayeredNetwork {
let mut rng = rand::thread_rng();
let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_count(), self.get_outputs_count());
let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid)
.build(&mut rng, NeuralArchitecture::Multilayered);
net
}

// Function to evaluate performance of a given NN.
fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
let mut rng: StdRng = StdRng::from_seed(&[0]);

let mut input = (0..self.get_inputs_count())
let mut input = (0..self.get_inputs_num())
.map(|_| rng.gen::<f32>())
.collect::<Vec<f32>>();
// compute NN output using random input.
Expand All @@ -112,4 +108,5 @@ impl NeuroProblem for RandomNEProblem {
}
}


```
2 changes: 0 additions & 2 deletions examples/ga.rs
@@ -1,8 +1,6 @@
extern crate rand;
extern crate revonet;

//use rand::{Rng, StdRng, SeedableRng};

use revonet::ea::*;
use revonet::ga::*;
use revonet::problem::*;
Expand Down
23 changes: 23 additions & 0 deletions src/ga.rs
Expand Up @@ -10,6 +10,29 @@ use settings::*;


/// Baseline structure for [Genetic Algorithm](https://en.wikipedia.org/wiki/Genetic_algorithm)
///
/// # Example: Running GA to solve minimization problem:
/// ```
/// extern crate rand;
/// extern crate revonet;
///
/// use revonet::ea::*;
/// use revonet::ga::*;
/// use revonet::problem::*;
/// use revonet::settings::*;
///
/// fn main() {
/// let pop_size = 20u32;
/// let problem_dim = 10u32;
/// let problem = SphereProblem{};
///
/// let gen_count = 10u32;
/// let settings = EASettings::new(pop_size, gen_count, problem_dim);
/// let mut ga: GA<SphereProblem> = GA::new(&problem);
/// let res = ga.run(settings).expect("Error during GA run");
/// println!("\n\nGA results: {:?}", res);
/// }
/// ```
pub struct GA<'a, P: Problem + 'a> {
/// Context structure containing information about GA run, its progress and results.
ctx: Option<EAContext<RealCodedIndividual>>,
Expand Down
22 changes: 22 additions & 0 deletions src/ne.rs
Expand Up @@ -119,6 +119,28 @@ impl Individual for NEIndividual {
//================================================================================

/// Structure for neuroevolutionary algorithm.
///
/// # Example: Run neuroevolutionary algorithm to solve XOR problem.
/// ```
/// extern crate revonet;
///
/// use revonet::ea::*;
/// use revonet::ne::*;
/// use revonet::neproblem::*;
/// use revonet::settings::*;
///
/// fn main() {
/// let (pop_size, gen_count, param_count) = (20, 50, 100); // gene_count does not matter here as NN structure is defined by a problem.
/// let settings = EASettings::new(pop_size, gen_count, param_count);
/// let problem = XorProblem::new();
///
/// let mut ne: NE<XorProblem> = NE::new(&problem);
/// let res = ne.run(settings).expect("Error: NE result is empty");
/// println!("result: {:?}", res);
/// println!("\nbest individual: {:?}", res.best);
/// }
/// ```

pub struct NE<'a, P: Problem + 'a> {
/// Context structure containing information about GA run, its progress and results.
ctx: Option<EAContext<NEIndividual>>,
Expand Down
51 changes: 51 additions & 0 deletions src/neproblem.rs
Expand Up @@ -9,6 +9,57 @@ use problem::*;
//--------------------------------------------

/// Trait for problem where NN is a solution.
///
/// # Example: Custom NE problem
/// ```
/// extern crate revonet;
/// extern crate rand;
///
/// use rand::{Rng, SeedableRng, StdRng};
///
/// use revonet::ea::*;
/// use revonet::ne::*;
/// use revonet::neuro::*;
/// use revonet::neproblem::*;
///
/// // Dummy problem returning random fitness.
/// struct RandomNEProblem {}
///
/// impl RandomNEProblem {
/// fn new() -> RandomNEProblem {
/// RandomNEProblem{}
/// }
/// }
///
/// impl NeuroProblem for RandomNEProblem {
/// // return number of NN inputs.
/// fn get_inputs_num(&self) -> usize {1}
/// // return number of NN outputs.
/// fn get_outputs_num(&self) -> usize {1}
/// // return NN with random weights and a fixed structure. For now the structure should be the same all the time to make sure that crossover is possible. Likely to change in the future.
/// fn get_default_net(&self) -> MultilayeredNetwork {
/// let mut rng = rand::thread_rng();
/// let mut net: MultilayeredNetwork = MultilayeredNetwork::new(self.get_inputs_num(), self.get_outputs_num());
/// net.add_hidden_layer(5 as usize, ActivationFunctionType::Sigmoid)
/// .build(&mut rng, NeuralArchitecture::Multilayered);
/// net
/// }
///
/// // Function to evaluate performance of a given NN.
/// fn compute_with_net<T: NeuralNetwork>(&self, nn: &mut T) -> f32 {
/// let mut rng: StdRng = StdRng::from_seed(&[0]);
///
/// let mut input = (0..self.get_inputs_num())
/// .map(|_| rng.gen::<f32>())
/// .collect::<Vec<f32>>();
/// // compute NN output using random input.
/// let mut output = nn.compute(&input);
/// output[0]
/// }
/// }
///
/// fn main() {}
/// ```
pub trait NeuroProblem: Problem {
/// Number of input variables.
fn get_inputs_num(&self) -> usize;
Expand Down
44 changes: 23 additions & 21 deletions src/neuro.rs
Expand Up @@ -108,25 +108,10 @@ impl MultilayeredNetwork {
/// * `size` - number of nodes in a layer.
/// * `actf` - type of activation function.
pub fn add_hidden_layer(&mut self, size: usize, actf: ActivationFunctionType) -> &mut Self {
self.add_layer(size, actf, false)
}

/// Add a hidden layer with bypass (skip) connections, so that its output also contains input signals.
///
/// Panics if the network has already been initialized .
///
/// # Arguments:
/// * `size` - number of nodes in a layer.
/// * `actf` - type of activation function.
pub fn add_hidden_bypass_layer(&mut self, size: usize, actf: ActivationFunctionType) -> &mut Self {
self.add_layer(size, actf, true)
}

fn add_layer(&mut self, size: usize, actf: ActivationFunctionType, is_bypass: bool) -> &mut Self {
if self.is_built {
panic!("Can not add layer to already built network.");
}
self.layers.push(Box::new(NeuralLayer::new(size, actf, is_bypass)));
self.layers.push(Box::new(NeuralLayer::new(size, actf)));
self
}

Expand All @@ -142,7 +127,7 @@ impl MultilayeredNetwork {
self.arch = arch;

// add output layer.
self.layers.push(Box::new(NeuralLayer::new(self.outputs_num, ActivationFunctionType::Linear, false)));
self.layers.push(Box::new(NeuralLayer::new(self.outputs_num, ActivationFunctionType::Linear)));

// init weights and biases for all layers.
let mut inputs = self.inputs_num;
Expand Down Expand Up @@ -314,8 +299,6 @@ pub struct NeuralLayer {
outputs: Vec<f32>,
/// Type of activation function for every node in the layer.
activation: ActivationFunctionType,
/// Indicates whether the layer implements skip connections to propagate input signals to output.
is_bypass: bool,
}

#[allow(dead_code)]
Expand All @@ -325,15 +308,14 @@ impl NeuralLayer {
/// # Arguments:
/// * `size` - number of nodes.
/// * `actf` - type of activation function.
pub fn new(size: usize, actf: ActivationFunctionType, is_bypass: bool) -> NeuralLayer {
pub fn new(size: usize, actf: ActivationFunctionType) -> NeuralLayer {
NeuralLayer{
size: size,
inputs_num: 0usize,
weights: Vec::new(),
biases: Vec::new(),
outputs: Vec::new(),
activation: actf,
is_bypass: is_bypass,
}
}

Expand Down Expand Up @@ -411,9 +393,23 @@ pub enum ActivationFunctionType {
Relu,
}

/// Trait to generalize behaviour of activation function.
pub trait ActivationFunction: Debug {
/// Compute activation function value using given argument.
/// Implementation should call `ActivationFunction::compute_static` to avoid duplication.
///
/// # Arguments:
/// * `x` - value of argument of activation function.
fn compute(&self, x: f32) -> f32;

/// Compute activation function value using given argument.
/// This is a static variant of the function `compute`.
///
/// # Arguments:
/// * `x` - value of argument of activation function.
fn compute_static(x: f32) -> f32;

/// Construct an activation function instance.
fn new() -> Self;
}

Expand Down Expand Up @@ -450,6 +446,12 @@ impl ActivationFunction for ReluActivation {
}
}

/// Computes activation function outputs by using array of arguments.
/// The result is written into the input array.
///
/// # Arguments:
/// * `xs` - array of arguments for vector of activation functions.
/// * `actf` - element of the `ActivationFunctionType` denoting type of activation.
pub fn compute_activations_inplace(xs: &mut [f32], actf: ActivationFunctionType) {
let actf_ptr: fn(f32) -> f32;
match actf {
Expand Down
32 changes: 31 additions & 1 deletion src/problem.rs
Expand Up @@ -5,7 +5,37 @@ use ea::*;

/// Represents baseline interface for the objective function.
///
/// By default solution is a vector of real-numbers.
/// By default solution is represented by a vector of real-numbers.
///
/// # Example: Custom optimization problem
/// ```
/// extern crate revonet;
/// extern crate rand;
///
/// use rand::{Rng, SeedableRng, StdRng};
///
/// use revonet::ea::*;
/// use revonet::problem::*;
///
/// // Dummy problem returning random fitness.
/// pub struct DummyProblem;
///
/// impl Problem for DummyProblem {
/// // Function to evaluate a specific individual.
/// fn compute<T: Individual>(&self, ind: &mut T) -> f32 {
/// // use `to_vec` to get real-coded representation of an individual.
/// let v = ind.to_vec().unwrap();
///
/// // Perform calculations as per optimization problem being implemened.
/// // Here just a random value is returned.
/// let mut rng: StdRng = StdRng::from_seed(&[0]);
/// rng.gen::<f32>()
/// }
/// }
///
/// fn main() {}
/// ```

pub trait Problem{
/// Returns whether given fitness value is enough to be a solution.
///
Expand Down

0 comments on commit 7cad1cc

Please sign in to comment.