Skip to content

Commit

Permalink
Add wincompat.h to ensure some cpp files can be compiled painlessly.
Browse files Browse the repository at this point in the history
  • Loading branch information
niuzhiheng committed Jun 26, 2014
1 parent a879606 commit abfee75
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 7 deletions.
3 changes: 1 addition & 2 deletions README.md
Expand Up @@ -28,9 +28,8 @@ Currently it can be built by VS2012 for x64 flatform only. This is because the d
- Double click `train_lenet.bat` to see the training progress .

#### Known Issues
I have trained on ImageNet with this windows porting as well. And some Issues found are:
- It takes obvious longer time when you compile for the first time. Therefore please refrain from using `clean & rebuild`.
- The speed is much slower than the one built on Ubuntu. 20 iterations take 79s on Windows, whereas same number of iterations take about 30s on Ubuntu (on GTX Titan).
- I have trained on ImageNet with this windows porting as well. The speed is much slower than the one built on Ubuntu. 20 iterations take 79s on Windows, whereas same number of iterations take about 30s on Ubuntu (on GTX Titan).

#### Bug Report
- Please create new issues in [github](https://github.com/niuzhiheng/caffe/issues) if you find any bug.
Expand Down
10 changes: 8 additions & 2 deletions examples/MainCaller.cpp
@@ -1,4 +1,10 @@
#include "../../tools/train_net.cpp"
//#include "../../tools/net_speed_benchmark.cpp"
//#include "../../tools/finetune_net.cpp"
//#include "../../tools/test_net.cpp"
//#include "../../tools/finetune_net.cpp"
//#include "../../tools/net_speed_benchmark.cpp"
//#include "../../tools/dump_network.cpp"

//#include "../../tools/convert_imageset.cpp"
//#include "../../tools/extract_features.cpp"
//#include "../../tools/convert_imageset.cpp"
//#include "../../tools/compute_image_mean.cpp"
1 change: 1 addition & 0 deletions include/caffe/common.hpp
Expand Up @@ -13,6 +13,7 @@
#include <curand.h>
#include <driver_types.h> // cuda driver types
#include <glog/logging.h>
#include <wincompat.h>

// Disable the copy and assignment operator for a class.
#define DISABLE_COPY_AND_ASSIGN(classname) \
Expand Down
7 changes: 7 additions & 0 deletions include/wincompat.h
@@ -0,0 +1,7 @@
#if !defined _HEADER_WIN_COMPAT_20140627_INCLUDED_
#define _HEADER_WIN_COMPAT_20140627_INCLUDED_

typedef unsigned int uint;
#define snprintf _snprintf

#endif //_HEADER_WIN_COMPAT_20140627_INCLUDED_
4 changes: 2 additions & 2 deletions tools/extract_features.cpp
Expand Up @@ -87,7 +87,7 @@ int feature_extraction_pipeline(int argc, char** argv) {
}
*/
string feature_extraction_proto(argv[++arg_pos]);
shared_ptr<Net<Dtype> > feature_extraction_net(
boost::shared_ptr<Net<Dtype> > feature_extraction_net(
new Net<Dtype>(feature_extraction_proto));
feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto);

Expand Down Expand Up @@ -121,7 +121,7 @@ int feature_extraction_pipeline(int argc, char** argv) {
int image_index = 0;
for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) {
feature_extraction_net->Forward(input_vec);
const shared_ptr<Blob<Dtype> > feature_blob = feature_extraction_net
const boost::shared_ptr<Blob<Dtype> > feature_blob = feature_extraction_net
->blob_by_name(extract_feature_blob_name);
int num_features = feature_blob->num();
int dim_features = feature_blob->count() / num_features;
Expand Down
1 change: 0 additions & 1 deletion tools/net_speed_benchmark.cpp
Expand Up @@ -19,7 +19,6 @@
#include "caffe/solver.hpp"

using namespace caffe; // NOLINT(build/namespaces)
typedef unsigned int uint;

int main(int argc, char** argv) {
int total_iter = 50;
Expand Down

0 comments on commit abfee75

Please sign in to comment.