Skip to content

Commit

Permalink
Full set of conv nets in eblearn
Browse files Browse the repository at this point in the history
  • Loading branch information
James Bergstra committed Apr 1, 2010
1 parent c8384f9 commit 13082f7
Show file tree
Hide file tree
Showing 6 changed files with 258 additions and 10 deletions.
43 changes: 40 additions & 3 deletions eblearn/Makefile
@@ -1,6 +1,43 @@

mnist_example.x : mnist_example.cc
g++ -I/usr/include/eblearn -o mnist_example.x mnist_example.cc -leblearn
all: mnist_example_ipp.x mnist_example_noipp.x convnet_noipp.x convnet_ipp.x convnet96_ipp.x convnet96_noipp.x convnet256_ipp.x convnet256_noipp.x

all: mnist_example.x
clean:
rm *.x

mnist_example_ipp.x : mnist_example.cc
g++ -I${PUB_PREFIX}/eblearn_ipp -o mnist_example_ipp.x mnist_example.cc\
-L/u/bergstrj/pub/intel/ipp/6.1.2.051/em64t/sharedlib\
-L${PUB_PREFIX}/eblearn_ipp -leblearn -lippiem64t -pthread


mnist_example_noipp.x : mnist_example.cc
g++ -O2 -I${PUB_PREFIX}/eblearn_noipp -o mnist_example_noipp.x mnist_example.cc\
-L${PUB_PREFIX}/eblearn_noipp -leblearn

convnet_noipp.x : convnet.cc
g++ -O2 -I${PUB_PREFIX}/eblearn_noipp -o convnet_noipp.x convnet.cc\
-L${PUB_PREFIX}/eblearn_noipp -leblearn

convnet96_noipp.x : convnet96.cc
g++ -O2 -I${PUB_PREFIX}/eblearn_noipp -o convnet96_noipp.x convnet96.cc\
-L${PUB_PREFIX}/eblearn_noipp -leblearn

convnet256_noipp.x : convnet256.cc
g++ -O2 -I${PUB_PREFIX}/eblearn_noipp -o convnet256_noipp.x convnet256.cc\
-L${PUB_PREFIX}/eblearn_noipp -leblearn

convnet_ipp.x : convnet.cc
g++ -DUSED_IPP -O2 -I${PUB_PREFIX}/eblearn_ipp -o convnet_ipp.x convnet.cc\
-L/u/bergstrj/pub/intel/ipp/6.1.2.051/em64t/sharedlib\
-L${PUB_PREFIX}/eblearn_ipp -leblearn -lippiem64t -pthread

convnet96_ipp.x : convnet96.cc
g++ -DUSED_IPP -O2 -I${PUB_PREFIX}/eblearn_ipp -o convnet96_ipp.x convnet96.cc\
-L/u/bergstrj/pub/intel/ipp/6.1.2.051/em64t/sharedlib\
-L${PUB_PREFIX}/eblearn_ipp -leblearn -lippiem64t -pthread

convnet256_ipp.x : convnet256.cc
g++ -DUSED_IPP -O2 -I${PUB_PREFIX}/eblearn_ipp -o convnet256_ipp.x convnet256.cc\
-L/u/bergstrj/pub/intel/ipp/6.1.2.051/em64t/sharedlib\
-L${PUB_PREFIX}/eblearn_ipp -leblearn -lippiem64t -pthread

65 changes: 65 additions & 0 deletions eblearn/convnet.cc
@@ -1,2 +1,67 @@
#include "libeblearn.h"
#include <time.h>
#include <sys/time.h>

using namespace std;
using namespace ebl; // all eblearn objects are under the ebl namespace

static double time_time() // a time function like time.time()
{
struct timeval tv;
gettimeofday(&tv, 0);
return (double) tv.tv_sec + (double) tv.tv_usec / 1000000.0;
}

typedef double t_net;

int main(int argc, char **argv) { // regular main without gui
init_drand(92394); // initialize random seed

intg n_examples = 1000; // maximum training set size: 60000
idxdim dims(1,32,32); // get order and dimensions of sample

//! create 1-of-n targets with target 1.0 for shown class, -1.0 for the rest
idx<t_net> targets = create_target_matrix(10, 1.0);
idx<t_net> inputs(n_examples, 32, 32);

parameter<t_net> theparam(60000); // create trainable parameter
lenet5<t_net> l5(theparam, 32, 32, 5, 5, 2, 2, 5, 5, 2, 2, 120, 10);
// TODO: use an all-to-all connection table in second layer convolution
// Because that's what the other packages implement.
supervised_euclidean_machine<t_net, ubyte> thenet(
(module_1_1<t_net>&)l5,
targets,
dims);
supervised_trainer<t_net, ubyte,ubyte> thetrainer(thenet, theparam);
classifier_meter trainmeter, testmeter;
forget_param_linear fgp(1, 0.5);
thenet.forget(fgp);

// learning parameters
gd_param gdp(/* double leta*/ 0.0001,
/* double ln */ 0.0,
/* double l1 */ 0.0,
/* double l2 */ 0.0,
/* int dtime */ 0,
/* double iner */0.0,
/* double a_v */ 0.0,
/* double a_t */ 0.0,
/* double g_t*/ 0.0);
infer_param infp;

state_idx<t_net> dummy_input(1, 32, 32);
int J = 2000;
double t = time_time();
for (intg j = 0; j < J; ++j)
{
thetrainer.learn_sample(dummy_input, j%10, gdp);
// TODO: iterate over mock dataset to simulate more realistic
// memaccess pattern
}
#ifdef USED_IPP
cout << "ConvSmall\teblearn{ipp}\t" << J / (time_time() - t) << endl;
#else
cout << "ConvSmall\teblearn\t" << J / (time_time() - t) << endl;
#endif
return 0;
}
66 changes: 66 additions & 0 deletions eblearn/convnet256.cc
@@ -0,0 +1,66 @@
#include "libeblearn.h"
#include <time.h>
#include <sys/time.h>

using namespace std;
using namespace ebl; // all eblearn objects are under the ebl namespace

static double time_time() // a time function like time.time()
{
struct timeval tv;
gettimeofday(&tv, 0);
return (double) tv.tv_sec + (double) tv.tv_usec / 1000000.0;
}

typedef double t_net;

int main(int argc, char **argv) { // regular main without gui
init_drand(92394); // initialize random seed

intg n_examples = 20; // maximum training set size: 60000
idxdim dims(1,256,256); // get order and dimensions of sample

//! create 1-of-n targets with target 1.0 for shown class, -1.0 for the rest
idx<t_net> targets = create_target_matrix(10, 1.0);
idx<t_net> inputs(n_examples, 256, 256);

parameter<t_net> theparam(6000); // create trainable parameter
lenet5<t_net> l5(theparam, 256, 256, 7, 7, 5, 5, 7, 7, 4, 4, 120, 10);
// TODO: use an all-to-all connection table in second layer convolution
// Because that's what the other packages implement.
supervised_euclidean_machine<t_net, ubyte> thenet(
(module_1_1<t_net>&)l5,
targets,
dims);
supervised_trainer<t_net, ubyte,ubyte> thetrainer(thenet, theparam);
classifier_meter trainmeter, testmeter;
forget_param_linear fgp(1, 0.5);
thenet.forget(fgp);

// learning parameters
gd_param gdp(/* double leta*/ 0.0001,
/* double ln */ 0.0,
/* double l1 */ 0.0,
/* double l2 */ 0.0,
/* int dtime */ 0,
/* double iner */0.0,
/* double a_v */ 0.0,
/* double a_t */ 0.0,
/* double g_t*/ 0.0);
infer_param infp;

state_idx<t_net> dummy_input(1, 256, 256);
double t = time_time();
for (intg j = 0; j < n_examples; ++j)
{
thetrainer.learn_sample(dummy_input, j%10, gdp);
// TODO: iterate over mock dataset to simulate more realistic
// memaccess pattern
}
#ifdef USED_IPP
cout << "ConvLarge\teblearn{ipp}\t" << n_examples / (time_time() - t) << endl;
#else
cout << "ConvLarge\teblearn\t" << n_examples / (time_time() - t) << endl;
#endif
return 0;
}
66 changes: 66 additions & 0 deletions eblearn/convnet96.cc
@@ -0,0 +1,66 @@
#include "libeblearn.h"
#include <time.h>
#include <sys/time.h>

using namespace std;
using namespace ebl; // all eblearn objects are under the ebl namespace

static double time_time() // a time function like time.time()
{
struct timeval tv;
gettimeofday(&tv, 0);
return (double) tv.tv_sec + (double) tv.tv_usec / 1000000.0;
}

typedef double t_net;

int main(int argc, char **argv) { // regular main without gui
init_drand(92394); // initialize random seed

intg n_examples = 100; // maximum training set size: 60000
idxdim dims(1,96,96); // get order and dimensions of sample

//! create 1-of-n targets with target 1.0 for shown class, -1.0 for the rest
idx<t_net> targets = create_target_matrix(10, 1.0);
idx<t_net> inputs(n_examples, 96, 96);

parameter<t_net> theparam(6000); // create trainable parameter
lenet5<t_net> l5(theparam, 96, 96, 7, 7, 3, 3, 7, 7, 3, 3, 120, 10);
// TODO: use an all-to-all connection table in second layer convolution
// Because that's what the other packages implement.
supervised_euclidean_machine<t_net, ubyte> thenet(
(module_1_1<t_net>&)l5,
targets,
dims);
supervised_trainer<t_net, ubyte,ubyte> thetrainer(thenet, theparam);
classifier_meter trainmeter, testmeter;
forget_param_linear fgp(1, 0.5);
thenet.forget(fgp);

// learning parameters
gd_param gdp(/* double leta*/ 0.0001,
/* double ln */ 0.0,
/* double l1 */ 0.0,
/* double l2 */ 0.0,
/* int dtime */ 0,
/* double iner */0.0,
/* double a_v */ 0.0,
/* double a_t */ 0.0,
/* double g_t*/ 0.0);
infer_param infp;

state_idx<t_net> dummy_input(1, 96, 96);
double t = time_time();
for (intg j = 0; j < n_examples; ++j)
{
thetrainer.learn_sample(dummy_input, j%10, gdp);
// TODO: iterate over mock dataset to simulate more realistic
// memaccess pattern
}
#ifdef USED_IPP
cout << "ConvMed\teblearn{ipp}\t" << n_examples / (time_time() - t) << endl;
#else
cout << "ConvMed\teblearn\t" << n_examples / (time_time() - t) << endl;
#endif
return 0;
}
16 changes: 9 additions & 7 deletions eblearn/mnist_example.cc
Expand Up @@ -2,10 +2,6 @@
#include <time.h>
#include <sys/time.h>

#ifdef __GUI__
#include "libeblearngui.h"
#endif

using namespace std;
using namespace ebl; // all eblearn objects are under the ebl namespace

Expand Down Expand Up @@ -90,11 +86,13 @@ int main(int argc, char **argv) { // regular main without gui
thetrainer.init(train_ds, &trainmeter);
// training on lowest size common to all classes (times # classes)
// now do training iterations
cerr << "... Training network from " << train_ds.get_lowest_common_size() << endl;
//cerr << "... Training network from " << train_ds.get_lowest_common_size() << endl;
double t = time_time();
train_ds.fprop(*thetrainer.input, thetrainer.label);
lab = thetrainer.label.get();
for (intg j = 0; j < train_ds.get_lowest_common_size(); ++j) {
//int J = train_ds.get_lowest_common_size();
int J = 2000;
for (intg j = 0; j < J; ++j) {
//train_ds.fprop(*thetrainer.input, thetrainer.label);
//lab = thetrainer.label.get();
thetrainer.learn_sample(*thetrainer.input, lab, gdp);
Expand All @@ -103,7 +101,11 @@ int main(int argc, char **argv) { // regular main without gui
// log.update(age, output, label.get(), energy);
//train_ds.next_train();
}
cerr << "... Iteration took" << t - time_time() << "seconds" << endl;
#ifdef __IPP__
cout << "lenet5\teblearn{ipp}\t" << J / (time_time() - t) << endl;
#else
cout << "lenet5\teblearn\t" << J / (time_time() - t) << endl;
#endif
return 0;
}

Expand Down
12 changes: 12 additions & 0 deletions eblearn/run.sh
@@ -0,0 +1,12 @@
#!/bin/sh

# LD_LIBRARY_PATH=$PUB_PREFIX/eblearn_ipp:$LD_LIBRARY_PATH ./mnist_example_ipp.x /data/lisa/data/mnist
# LD_LIBRARY_PATH=$PUB_PREFIX/eblearn_noipp:$LD_LIBRARY_PATH ./mnist_example_noipp.x /data/lisa/data/mnist

LD_LIBRARY_PATH=$PUB_PREFIX/eblearn_ipp:$LD_LIBRARY_PATH ./convnet_ipp.x > ${HOSTNAME}_eblearn_convnet_ipp.bmark
LD_LIBRARY_PATH=$PUB_PREFIX/eblearn_ipp:$LD_LIBRARY_PATH ./convnet96_ipp.x > ${HOSTNAME}_eblearn_convnet96_ipp.bmark
LD_LIBRARY_PATH=$PUB_PREFIX/eblearn_ipp:$LD_LIBRARY_PATH ./convnet256_ipp.x > ${HOSTNAME}_eblearn_convnet256_ipp.bmark

LD_LIBRARY_PATH=$PUB_PREFIX/eblearn_noipp:$LD_LIBRARY_PATH ./convnet_noipp.x > ${HOSTNAME}_eblearn_convnet.bmark
LD_LIBRARY_PATH=$PUB_PREFIX/eblearn_noipp:$LD_LIBRARY_PATH ./convnet96_noipp.x > ${HOSTNAME}_eblearn_convnet96.bmark
LD_LIBRARY_PATH=$PUB_PREFIX/eblearn_noipp:$LD_LIBRARY_PATH ./convnet256_noipp.x > ${HOSTNAME}_eblearn_convnet256.bmark

0 comments on commit 13082f7

Please sign in to comment.