Skip to content

Commit

Permalink
Merged documentation and fixes for integration into internal projects.
Browse files Browse the repository at this point in the history
  • Loading branch information
ralfbiedert committed Aug 15, 2018
1 parent 60934db commit ac12c52
Show file tree
Hide file tree
Showing 22 changed files with 571 additions and 807 deletions.
3 changes: 2 additions & 1 deletion Cargo.toml
Expand Up @@ -21,7 +21,8 @@ maintenance = { status = "experimental" }


[dependencies]
simd_aligned = "0.1.1"
# simd_aligned = "0.1.1"
simd_aligned = { path = "../simd_aligned" }
packed_simd = "0.1"
rand = "0.5"
pest = "1.0"
Expand Down
4 changes: 2 additions & 2 deletions README.md
Expand Up @@ -22,7 +22,7 @@ You trained a SVM using [libSVM](https://github.com/cjlin1/libsvm), now you want
* free of `unsafe` code ;)


# Principal Usage
# Usage

Train with [libSVM](https://github.com/cjlin1/libsvm) (e.g., using the tool `svm-train`), then classify with `ffsvm-rust`.

Expand All @@ -42,7 +42,7 @@ features[3] = -0.221184;

svm.predict_value(&mut problem)?;

assert_eq!(problem.result(), Outcome::Label(42));
assert_eq!(problem.solution(), Solution::Label(42));
```

From C / FFI:
Expand Down
6 changes: 3 additions & 3 deletions benches/svm_dense.rs
Expand Up @@ -4,23 +4,23 @@ extern crate ffsvm;
extern crate test;

mod svm_dense {
use ffsvm::{DenseSVM, Linear, ModelFile, Poly, Predict, Problem, Rbf, SVMType, Sigmoid};
use ffsvm::{DenseSVM, ModelFile, Predict, Problem};
use std::convert::TryFrom;
use test::Bencher;

/// Produces a test case run for benchmarking
#[allow(dead_code)]
fn produce_testcase(svm_type: &str, kernel_type: &str, total_sv: u32, num_attributes: u32) -> impl FnMut() {
let raw_model = ModelFile::random_dense(svm_type, kernel_type, total_sv, num_attributes);
let mut svm = DenseSVM::try_from(&raw_model).unwrap();
let svm = DenseSVM::try_from(&raw_model).unwrap();
let mut problem = Problem::from(&svm);
let problem_mut = problem.features().as_slice_mut();

for i in 0 .. num_attributes {
problem_mut[i as usize] = i as f32;
}

move || (&mut svm).predict_value(&mut problem).expect("This should work")
move || (&svm).predict_value(&mut problem).expect("This should work")
}

// RBF
Expand Down
4 changes: 2 additions & 2 deletions benches/svm_sparse.rs
Expand Up @@ -4,7 +4,7 @@ extern crate ffsvm;
extern crate test;

mod svm_sparse {
use ffsvm::{Linear, ModelFile, Poly, Predict, Problem, Rbf, SVMType, Sigmoid, SparseSVM};
use ffsvm::{ModelFile, Predict, Problem, SparseSVM};
use std::convert::TryFrom;
use test::Bencher;

Expand All @@ -20,7 +20,7 @@ mod svm_sparse {
problem_mut[i as usize] = i as f32;
}

move || (&mut svm).predict_value(&mut problem).expect("This should work")
move || (&svm).predict_value(&mut problem).expect("This should work")
}

// RBF
Expand Down
4 changes: 2 additions & 2 deletions docs/performance.md
Expand Up @@ -2,7 +2,7 @@

# Performance vs. LibSVM

Benchmarks are a tricky thing, but for classifying dense RBF-C-SVMs `ffsvm` should be between 2.5x and 14x faster than `libSVM` on reasonably modern x86 CPUs (supporting AVX2).
Benchmarks are a tricky thing, but for classifying dense SVMs `ffsvm` should be between 2.5x and 14x faster than `libSVM` on reasonably modern x86 CPUs (supporting AVX2).

![performance](performance_absolute.v3.png)

Expand All @@ -14,7 +14,7 @@ There are 3 major factors contributing to this:

* no allocation during classification
* cache-friendly memory layout
* usage of SIMD / AVX
* usage of SIMD

In addition, ffsvm mixes seamlessly with Rayon for _batch classification_, providing even higher speed ups if you classify more than one problem at a time.

Expand Down
4 changes: 2 additions & 2 deletions examples/basic.rs
Expand Up @@ -7,7 +7,7 @@ fn main() -> Result<(), Error> {
let svm = DenseSVM::try_from(SAMPLE_MODEL)?;

let mut problem = Problem::from(&svm);
let mut features = problem.features();
let features = problem.features();

features[0] = 0.55838;
features[1] = -0.157895;
Expand All @@ -16,7 +16,7 @@ fn main() -> Result<(), Error> {

svm.predict_value(&mut problem)?;

assert_eq!(problem.result(), Outcome::Label(12));
assert_eq!(problem.solution(), Solution::Label(12));

Ok(())
}
2 changes: 1 addition & 1 deletion src/errors.rs
Expand Up @@ -40,7 +40,7 @@ pub enum Error {
/// If the model does not have a `degree` set this error may be raised.
NoDegree,

/// Wrapper for [ModelError] when unifiying error handling.
/// Wrapper for internal parsing error when unifiying error handling.
ParsingError(String),
}

Expand Down
6 changes: 3 additions & 3 deletions src/lib.rs
Expand Up @@ -60,7 +60,7 @@
//!
//! svm.predict_value(&mut problem)?;
//!
//! assert_eq!(problem.result(), Outcome::Label(42));
//! assert_eq!(problem.solution(), Solution::Label(42));
//!
//! Ok(())
//! }
Expand All @@ -86,9 +86,9 @@ pub use crate::{
parser::ModelFile,
svm::{
core::SVMCore,
kernel::{KernelDense, Linear, Poly, Rbf, Sigmoid},
kernel::{KernelDense, KernelSparse, Linear, Poly, Rbf, Sigmoid},
predict::Predict,
problem::{Outcome, Problem},
problem::{DenseProblem, Problem, Solution, SparseProblem},
DenseSVM, SVMType, SparseSVM,
},
};
31 changes: 14 additions & 17 deletions src/sparse.rs
@@ -1,28 +1,25 @@
use std::{
collections::{btree_map::Iter, BTreeMap},
ops::{Index, IndexMut},
};
use std::ops::{Index, IndexMut};

#[derive(Clone, Debug)]
struct Entry<T>
where
T: Copy,
T: Copy + Clone + Default,
{
index: u32,
value: T,
}

#[derive(Clone, Debug)]
#[derive(Clone, Debug, Default)]
pub struct SparseVector<T>
where
T: Clone + Copy,
T: Clone + Copy + Default,
{
entries: Vec<Entry<T>>,
}

impl<T> SparseVector<T>
where
T: Clone + Copy,
T: Clone + Copy + Default,
{
pub fn new() -> Self { SparseVector { entries: Vec::new() } }

Expand All @@ -35,7 +32,7 @@ where
#[derive(Clone, Debug)]
pub struct SparseVectorIter<'a, T: 'a>
where
T: Clone + Copy,
T: Clone + Copy + Default,
{
/// Reference to the matrix we iterate over.
crate vector: &'a SparseVector<T>,
Expand All @@ -46,7 +43,7 @@ where

impl<'a, T> Iterator for SparseVectorIter<'a, T>
where
T: Clone + Copy,
T: Clone + Copy + Default,
{
type Item = (u32, T);

Expand All @@ -64,14 +61,14 @@ where

impl<T> Index<usize> for SparseVector<T>
where
T: Copy + Sized,
T: Copy + Sized + Default,
{
type Output = T;

fn index(&self, index: usize) -> &T {
// TODO: Beautify me

for (i, e) in self.entries.iter().enumerate() {
for e in self.entries.iter() {
if e.index == index as u32 {
return &e.value;
}
Expand Down Expand Up @@ -111,14 +108,14 @@ where
#[derive(Clone, Debug)]
pub struct SparseMatrix<T>
where
T: Clone + Copy,
T: Clone + Copy + Default,
{
vectors: Vec<SparseVector<T>>,
}

impl<T> SparseMatrix<T>
where
T: Clone + Copy,
T: Clone + Copy + Default,
{
pub fn with(rows: usize) -> Self {
SparseMatrix {
Expand All @@ -134,7 +131,7 @@ where

impl<T> Index<(usize, usize)> for SparseMatrix<T>
where
T: Copy + Sized,
T: Copy + Sized + Default,
{
type Output = T;

Expand All @@ -152,7 +149,7 @@ where
#[derive(Clone, Debug)]
pub struct SparseMatrixIter<'a, T: 'a>
where
T: Clone + Copy,
T: Clone + Copy + Default,
{
/// Reference to the matrix we iterate over.
crate matrix: &'a SparseMatrix<T>,
Expand All @@ -163,7 +160,7 @@ where

impl<'a, T> Iterator for SparseMatrixIter<'a, T>
where
T: Clone + Copy,
T: Clone + Copy + Default,
{
type Item = &'a SparseVector<T>;

Expand Down
2 changes: 1 addition & 1 deletion src/svm/class.rs
Expand Up @@ -32,7 +32,7 @@ impl Class<SimdMatrix<f32s, RowOptimized>> {

impl Class<SparseMatrix<f32>> {
/// Creates a new class with the given parameters.
pub fn with_parameters(classes: usize, support_vectors: usize, attributes: usize, label: u32) -> Class<SparseMatrix<f32>> {
pub fn with_parameters(classes: usize, support_vectors: usize, _attributes: usize, label: u32) -> Class<SparseMatrix<f32>> {
Class {
label,
num_support_vectors: support_vectors,
Expand Down

0 comments on commit ac12c52

Please sign in to comment.