Skip to content

Commit

Permalink
Merge a5a092a into 493e046
Browse files Browse the repository at this point in the history
  • Loading branch information
ajschumacher committed Jun 9, 2015
2 parents 493e046 + a5a092a commit fcd9e5c
Show file tree
Hide file tree
Showing 2 changed files with 131 additions and 5 deletions.
89 changes: 84 additions & 5 deletions src/simple_statistics.js
Expand Up @@ -254,6 +254,83 @@
return bayes_model;
}


// # [Perceptron Classifier](http://en.wikipedia.org/wiki/Perceptron)
//
// This is a single-layer perceptron classifier that takes
// arrays of numbers and predicts whether they should be classified
// as either 0 or 1 (negative or positive examples).
function perceptron() {
var perceptron_model = {},
// The weights, or coefficients of the model;
// weights are only populated when training with data.
weights = [],
// The bias term, or intercept; it is also a weight but
// it's stored separately for convenience as it is always
// multiplied by one.
bias = 0;

// ## Predict
// Use an array of features with the weight array and bias
// to predict whether an example is labeled 0 or 1.
perceptron_model.predict = function(features) {
// Only predict if previously trained
// on the same size feature array(s).
if (features.length !== weights.length) return null;
// Calculate the sum of features times weights,
// with the bias added (implicitly times one).
var score = 0;
for (var i = 0; i < weights.length; i++) {
score += weights[i] * features[i];
}
score += bias;
// Classify as 1 if the score is over 0, otherwise 0.
return score > 0 ? 1 : 0;
};

// ## Train
// Train the classifier with a new example, which is
// a numeric array of features and a 0 or 1 label.
perceptron_model.train = function(features, label) {
// Require that only labels of 0 or 1 are considered.
if (label !== 0 && label !== 1) return null;
// The length of the feature array determines
// the length of the weight array.
// The perceptron will continue learning as long as
// it keeps seeing feature arrays of the same length.
// When it sees a new data shape, it initializes.
if (features.length !== weights.length) {
weights = features;
bias = 1;
}
// Make a prediction based on current weights.
var prediction = perceptron_model.predict(features);
// Update the weights if the prediction is wrong.
if (prediction !== label) {
var gradient = label - prediction;
for (var i = 0; i < weights.length; i++) {
weights[i] += gradient * features[i];
}
bias += gradient;
}
return perceptron_model;
};

// Conveniently access the weights array.
perceptron_model.weights = function() {
return weights;
};

// Conveniently access the bias.
perceptron_model.bias = function() {
return bias;
};

// Return the completed model.
return perceptron_model;
}


// # sum
//
// is simply the result of adding all numbers
Expand Down Expand Up @@ -1158,6 +1235,10 @@
}
}

// We use `ε`, epsilon, as a stopping criterion when we want to iterate
// until we're "close enough".
var epsilon = 0.0001;

// # [Probit](http://en.wikipedia.org/wiki/Probit)
//
// This is the inverse of cumulative_std_normal_probability(),
Expand All @@ -1168,7 +1249,7 @@
// So, for example, probit(0.5 + 0.6827/2) ≈ 1 because 68.27% of values are
// normally found within 1 standard deviation above or below the mean.
function probit(p) {
if (p == 0) {
if (p === 0) {
p = epsilon;
} else if (p >= 1) {
p = 1 - epsilon;
Expand Down Expand Up @@ -1226,10 +1307,6 @@
return (x - mean) / standard_deviation;
}

// We use `ε`, epsilon, as a stopping criterion when we want to iterate
// until we're "close enough".
var epsilon = 0.0001;

// # [Factorial](https://en.wikipedia.org/wiki/Factorial)
//
// A factorial, usually written n!, is the product of all positive
Expand Down Expand Up @@ -1590,6 +1667,8 @@

ss.bayesian = bayesian;

ss.perceptron = perceptron;

// Distribution-related methods
ss.epsilon = epsilon; // We make ε available to the test suite.
ss.factorial = factorial;
Expand Down
47 changes: 47 additions & 0 deletions test/perceptron.test.js
@@ -0,0 +1,47 @@
var ss = require('../');
var test = require('tape');

test('perceptron', function(t) {
test('initializes to zeros if label is zero', function(t) {
var p = ss.perceptron();
p.train([1, 2, 3], 0);
t.deepEqual(p.weights(), [0, 0, 0]);
t.equal(p.bias(), 0);
t.end();
});

test('initializes to values if label is one', function(t) {
var p = ss.perceptron();
p.train([1, 2, 3], 1);
t.deepEqual(p.weights(), [1, 2, 3]);
t.equal(p.bias(), 1);
t.end();
});

test('learns to separate one from two', function(t) {
var p = ss.perceptron();
for (var i = 0; i < 4; i++) {
p.train([1], 0);
p.train([2], 1);
}
t.equal(p.predict([1]), 0);
t.equal(p.predict([2]), 1);
t.end();
});

test('learns a diagonal boundary', function(t) {
var p = ss.perceptron();
for (var i = 0; i < 5; i++) {
p.train([1, 1], 1);
p.train([0, 1], 0);
p.train([1, 0], 0);
p.train([0, 0], 0);
}
t.equal(p.predict([0, 0]), 0);
t.equal(p.predict([0, 1]), 0);
t.equal(p.predict([1, 0]), 0);
t.equal(p.predict([1, 1]), 1);
t.end();
});
t.end();
});

0 comments on commit fcd9e5c

Please sign in to comment.