Skip to content

Commit

Permalink
The model data has been separated from the estimation algorithm
Browse files Browse the repository at this point in the history
  • Loading branch information
nok committed Oct 14, 2017
1 parent 0e7c23c commit 635da46
Show file tree
Hide file tree
Showing 22 changed files with 508 additions and 538 deletions.
64 changes: 42 additions & 22 deletions examples/estimator/classifier/MLPClassifier/java/basics.ipynb

Large diffs are not rendered by default.

62 changes: 41 additions & 21 deletions examples/estimator/classifier/MLPClassifier/java/basics.py

Large diffs are not rendered by default.

148 changes: 67 additions & 81 deletions examples/estimator/classifier/MLPClassifier/js/basics.ipynb

Large diffs are not rendered by default.

150 changes: 68 additions & 82 deletions examples/estimator/classifier/MLPClassifier/js/basics.py

Large diffs are not rendered by default.

100 changes: 43 additions & 57 deletions examples/estimator/regressor/MLPRegressor/js/basics.ipynb

Large diffs are not rendered by default.

99 changes: 43 additions & 56 deletions examples/estimator/regressor/MLPRegressor/js/basics.py

Large diffs are not rendered by default.

56 changes: 27 additions & 29 deletions sklearn_porter/estimator/classifier/MLPClassifier/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,14 +150,32 @@ def create_method(self):
:return out : string
The built method as string.
"""
method_type = 'method.{}'.format(self.prefix)
temp_method = self.temp(method_type, skipping=True, n_indents=1)
method = temp_method.format(class_name=self.class_name,
method_name=self.method_name,
n_features=self.n_inputs,
n_classes=self.n_outputs)
out = self.indent(method, n_indents=0, skipping=True)
return out

def create_class(self, method):
"""
Build the estimator class.
Returns
-------
:return out : string
The built class as string.
"""

temp_arr = self.temp('arr')
temp_arr__ = self.temp('arr[][]')
temp_arr___ = self.temp('arr[][][]')

# Activations:
layers = list(self._get_activations())
layers = ', '.join(['atts'] + layers)
layers = ', '.join(layers)
layers = temp_arr__.format(type='double', name='layers', values=layers)

# Coefficients (weights):
Expand All @@ -171,45 +189,22 @@ def create_method(self):
coefficients.append(temp_arr.format(layer_weights))
coefficients = ', '.join(coefficients)
coefficients = temp_arr___.format(type='double',
name='COEFFICIENTS',
name='weights',
values=coefficients)

# Intercepts (biases):
intercepts = list(self._get_intercepts())
intercepts = ', '.join(intercepts)
intercepts = temp_arr__.format(type='double',
name='INTERCEPTS',
name='bias',
values=intercepts)

method_type = 'method.{}'.format(self.prefix)
temp_method = self.temp(method_type, skipping=True, n_indents=1)
method = temp_method.format(class_name=self.class_name,
method_name=self.method_name,
n_features=self.n_inputs,
n_classes=self.n_outputs,
layers=layers,
coefficients=coefficients,
intercepts=intercepts)
n_indents = 1 if self.target_language in ['js'] else 0
out = self.indent(method, n_indents=n_indents, skipping=True)
return out

def create_class(self, method):
"""
Build the estimator class.
Returns
-------
:return out : string
The built class as string.
"""
hidden_act_type = 'activation_fn.' + self.hidden_activation
n_indents = 1 if self.target_language in ['java'] else 2
hidden_act = self.temp(hidden_act_type, skipping=True,
n_indents=n_indents)
n_indents=1)
output_act_type = 'output_fn.' + self.output_activation
output_act = self.temp(output_act_type, skipping=True,
n_indents=n_indents)
n_indents=1)

temp_class = self.temp('class')
file_name = '{}.js'.format(self.class_name.lower())
Expand All @@ -218,7 +213,10 @@ def create_class(self, method):
n_features=self.n_inputs,
activation_function=hidden_act,
output_function=output_act,
file_name=file_name)
file_name=file_name,
weights=coefficients,
bias=intercepts,
layers=layers)
return out

def _get_intercepts(self):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Activation function (identity):
public static double[] compAct(double[] v) {
private double[] compAct(double[] v) {
return v;
}
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// Activation function (logistic):
public static double[] compAct(double[] v) {
private double[] compAct(double[] v) {
for (int i = 0, l = v.length; i < l; i++) {
v[i] = 1. / (1. + Math.exp(-v[i]));
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// Activation function (relu):
public static double[] compAct(double[] v) {
private double[] compAct(double[] v) {
for (int i = 0, l = v.length; i < l; i++) {
v[i] = Math.max(0, v[i]);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// Activation function (tanh):
public static double[] compAct(double[] v) {
private double[] compAct(double[] v) {
for (int i = 0, l = v.length; i < l; i++) {
v[i] = Math.tanh(v[i]);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,15 @@
class {class_name} {{

private double[][] layers;
private double[][][] weights;
private double[][] bias;

public {class_name}(double[][] layers, double[][][] weights, double[][] bias) {{
this.layers = layers;
this.weights = weights;
this.bias = bias;
}}

{activation_function}

{output_function}
Expand All @@ -8,11 +18,21 @@ class {class_name} {{

public static void main(String[] args) {{
if (args.length == {n_features}) {{
double[] atts = new double[args.length];
// Features:
double[] features = new double[args.length];
for (int i = 0, l = args.length; i < l; i++) {{
atts[i] = Double.parseDouble(args[i]);
features[i] = Double.parseDouble(args[i]);
}}
System.out.println({class_name}.{method_name}(atts));

// Model data:
{layers}
{weights}
{bias}

// Prediction:
{class_name} brain = new {class_name}(layers, weights, bias);
int estimation = brain.{method_name}(features);
System.out.println(estimation);
}}
}}
}}
Original file line number Diff line number Diff line change
@@ -1,24 +1,24 @@
public static int {method_name}(double[] atts) {{
if (atts.length != {n_features}) {{ return -1; }}
public int {method_name}(double[] neurons) {{
if (neurons.length != {n_features}) return -1;

{coefficients}
{intercepts}
double[][] network = new double[this.layers.length + 1][];
System.arraycopy(new double[][] {{neurons}}, 0, network, 0, 1);
System.arraycopy(this.layers, 0, network, 1, this.layers.length);

{layers}
for (int i = 0; i < layers.length - 1; i++) {{
for (int j = 0; j < layers[i + 1].length; j++) {{
for (int l = 0; l < layers[i].length; l++) {{
layers[i + 1][j] += layers[i][l] * COEFFICIENTS[i][l][j];
for (int i = 0; i < network.length - 1; i++) {{
for (int j = 0; j < network[i + 1].length; j++) {{
for (int l = 0; l < network[i].length; l++) {{
network[i + 1][j] += network[i][l] * this.weights[i][l][j];
}}
layers[i + 1][j] += INTERCEPTS[i][j];
network[i + 1][j] += this.bias[i][j];
}}
if ((i + 1) < (layers.length - 1)) {{
layers[i + 1] = {class_name}.compAct(layers[i + 1]);
if ((i + 1) < (network.length - 1)) {{
network[i + 1] = this.compAct(network[i + 1]);
}}
}}
layers[layers.length - 1] = {class_name}.compOut(layers[layers.length - 1]);
network[network.length - 1] = this.compOut(network[network.length - 1]);

if (layers[layers.length - 1][0] > .5) {{
if (network[network.length - 1][0] > .5) {{
return 1;
}}
return 0;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,28 +1,28 @@
public static int {method_name}(double[] atts) {{
if (atts.length != {n_features}) {{ return -1; }}
public int {method_name}(double[] neurons) {{
if (neurons.length != {n_features}) return -1;

{coefficients}
{intercepts}
double[][] network = new double[this.layers.length + 1][];
System.arraycopy(new double[][] {{neurons}}, 0, network, 0, 1);
System.arraycopy(this.layers, 0, network, 1, this.layers.length);

{layers}
for (int i = 0; i < layers.length - 1; i++) {{
for (int j = 0; j < layers[i + 1].length; j++) {{
for (int l = 0; l < layers[i].length; l++) {{
layers[i + 1][j] += layers[i][l] * COEFFICIENTS[i][l][j];
for (int i = 0; i < network.length - 1; i++) {{
for (int j = 0; j < network[i + 1].length; j++) {{
for (int l = 0; l < network[i].length; l++) {{
network[i + 1][j] += network[i][l] * this.weights[i][l][j];
}}
layers[i + 1][j] += INTERCEPTS[i][j];
network[i + 1][j] += this.bias[i][j];
}}
if ((i + 1) < (layers.length - 1)) {{
layers[i + 1] = {class_name}.compAct(layers[i + 1]);
if ((i + 1) < (network.length - 1)) {{
network[i + 1] = this.compAct(network[i + 1]);
}}
}}
layers[layers.length - 1] = {class_name}.compOut(layers[layers.length - 1]);
network[network.length - 1] = this.compOut(network[network.length - 1]);

int classIdx = -1;
double classVal = Double.NEGATIVE_INFINITY;
for (int i = 0, l = layers[layers.length - 1].length; i < l; i++) {{
if (layers[layers.length - 1][i] > classVal) {{
classVal = layers[layers.length - 1][i];
for (int i = 0, l = network[network.length - 1].length; i < l; i++) {{
if (network[network.length - 1][i] > classVal) {{
classVal = network[network.length - 1][i];
classIdx = i;
}}
}}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// Output function (logistic):
public static double[] compOut(double[] v) {
private double[] compOut(double[] v) {
for (int i = 0, l = v.length; i < l; i++) {
v[i] = 1. / (1. + Math.exp(-v[i]));
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// Output function (softmax):
public static double[] compOut(double[] v) {
private double[] compOut(double[] v) {
double max = Double.NEGATIVE_INFINITY;
for (double x : v) {
if (x > max) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,50 +1,33 @@
// Array.prototype.fill polyfill:
[].fill||(Array.prototype.fill=function(a){{for(var b=Object(this),c=parseInt(b.length,10),d=arguments[1],e=parseInt(d,10)||0,f=0>e?Math.max(c+e,0):Math.min(e,c),g=arguments[2],h=void 0===g?c:parseInt(g)||0,i=0>h?Math.max(c+h,0):Math.min(h,c);i>f;f++)b[f]=a;return b}});

var {class_name} = (function() {{
var instance;
var {class_name} = function(layers, weights, bias) {{

function init() {{
{activation_function}

{activation_function}
{output_function}

{output_function}
{method}

{method}

}};
}};

return {{
getInstance: function() {{
if (!instance) {{
instance = init();
}}
return instance;
}}
}};
// Model data:
const {layers}
const {weights}
const {bias}

}})();
// Estimator:
var brain = new {class_name}(layers, weights, bias);

if (typeof process !== 'undefined' && typeof process.argv !== 'undefined') {{
if (process.argv.length - 2 === {n_features}) {{
const features = process.argv.slice(2);
var brain = {class_name}.getInstance();
var prediction = brain.{method_name}(features);
console.log(prediction);
}}
}}

// Web Worker:
// File: {file_name}
var onmessage = function(e) {{
if (e.data.length === {n_features}) {{
const features = e.data;
var brain = {class_name}.getInstance();
var prediction = brain.{method_name}(features);
postMessage(prediction);
}}
}};
// File: main.js
// Web Worker (main.js):
// if (typeof window !== 'undefined' && window.Worker) {{
// var worker = new Worker('{file_name}');
// worker.onmessage = function(e) {{
Expand All @@ -53,3 +36,12 @@ var onmessage = function(e) {{
// worker.postMessage([/* feature vector */]);
// worker.postMessage([/* feature vector */]);
// }}

// Web Worker ({file_name}):
var onmessage = function(e) {{
if (e.data.length === {n_features}) {{
const features = e.data;
var prediction = brain.{method_name}(features);
postMessage(prediction);
}}
}};
Original file line number Diff line number Diff line change
@@ -1,28 +1,22 @@
// Model data:
const {coefficients}
const {intercepts}
this.{method_name} = function(neurons) {{
if (neurons.length != {n_features}) return -1;
var network = [neurons].concat(layers);

return {{
{method_name}: function(atts) {{
if (atts.length != {n_features}) {{ return -1; }};
var {layers}

for (var i = 0; i < layers.length - 1; i++) {{
for (var j = 0; j < layers[i + 1].length; j++) {{
for (var l = 0; l < layers[i].length; l++) {{
layers[i + 1][j] += layers[i][l] * COEFFICIENTS[i][l][j];
}}
layers[i + 1][j] += INTERCEPTS[i][j];
}}
if ((i + 1) < (layers.length - 1)) {{
layers[i + 1] = compAct(layers[i + 1]);
for (var i = 0; i < network.length - 1; i++) {{
for (var j = 0; j < network[i + 1].length; j++) {{
for (var l = 0; l < network[i].length; l++) {{
network[i + 1][j] += network[i][l] * weights[i][l][j];
}}
network[i + 1][j] += bias[i][j];
}}
layers[layers.length - 1] = compOut(layers[layers.length - 1]);

if (layers[layers.length - 1][0] > .5) {{
return 1;
if ((i + 1) < (network.length - 1)) {{
network[i + 1] = compAct(network[i + 1]);
}}
return 0;
}}
network[network.length - 1] = compOut(network[network.length - 1]);

if (network[network.length - 1][0] > .5) {{
return 1;
}}
return 0;
}};
Loading

0 comments on commit 635da46

Please sign in to comment.