Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adapt to python-protobuf >= 3.2.0 #105

wants to merge 8 commits into from
Show file tree
Hide file tree
Changes from all commits
File filter

Filter by extension

Filter by extension

Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -8,3 +8,8 @@
# Python cache

# IDEA project files.

# Generated files.
@@ -29,10 +29,18 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path, phase
if caffemodel_path is not None:
data = transformer.transform_data()
print_stderr('Saving data...')
# create directory if not existing
dirname = os.path.dirname(data_output_path)
if not os.path.exists(dirname):
with open(data_output_path, 'wb') as data_out:, data)
if code_output_path:
print_stderr('Saving source...')
# create directory if not existing
dirname = os.path.dirname(code_output_path)
if not os.path.exists(dirname):
with open(code_output_path, 'wb') as src_out:
@@ -0,0 +1,11 @@


@@ -0,0 +1,17 @@
# Example of Saving Model

This is an example of how to save the GraphDef and variables of a converted model
in the Tensorflow official form. By doing this, the converted model can be
conveniently applied with Tensorflow APIs in other languages.

For example, if a converted model is named "VGG", the generated code file should
be named as "", and the class name inside should remain "CaffeNet".

The module "VGG" should be able to be directly imported. So put it inside the
[save_graphdef](save_graphdef) folder, or add it to "sys.path".

To save model variables, pass the path of the converted data file (e.g. VGG.npy)
to the parameter "--data-input-path".

A "VGG_frozen.pb' is also generated with all variables converted into constants
in the saved graph.
Empty file.
@@ -0,0 +1,51 @@
#!/usr/bin/env python

import argparse
import os.path as osp
import sys

import tensorflow as tf
from import freeze_graph
from import saver as saver_lib

def save(name, data_input_path):
def getpardir(path): return osp.split(path)[0]
# Import the converted model's class
caffe_net_module = __import__(name)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
image_input = tf.placeholder(tf.float32, shape=[1, 227, 227, 3], name="data")
net = caffe_net_module.CaffeNet({'data': image_input})

# Save protocol buffer
pb_name = name + '.pb'
tf.train.write_graph(sess.graph_def, '.', pb_name + 'txt', True)
tf.train.write_graph(sess.graph_def, '.', pb_name, False)

if data_input_path is not None:
# Load the data
net.load(data_input_path, sess)
# Save the data
saver = saver_lib.Saver(tf.global_variables())
checkpoint_prefix = osp.join(osp.curdir, name + '.ckpt')
checkpoint_path =, checkpoint_prefix)

# Freeze the graph
freeze_graph.freeze_graph(pb_name, "",
True, checkpoint_path, 'fc8/fc8',
'save/restore_all', 'save/Const:0',
name + '_frozen.pb', False, "")

def main():
parser = argparse.ArgumentParser()
parser.add_argument('name', help='Name of the converted model')
parser.add_argument('--data-input-path', help='Converted data input path')
args = parser.parse_args()
save(, args.data_input_path)

if __name__ == '__main__':
File renamed without changes.
@@ -14,8 +14,8 @@ def import_caffe(self):
self.caffe = caffe
except ImportError:
# Fall back to the protobuf implementation
from . import caffepb
self.caffepb = caffepb
from . import caffe_pb2
self.caffepb = caffe_pb2
if self.caffe:
# Use the protobuf code from the imported distribution.
@@ -130,11 +130,11 @@ def conv(self,
output = convolve(input, kernel)
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
input_groups = tf.split(input,group, 3)
kernel_groups = tf.split(kernel, group, 3)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
output = tf.concat(output_groups, 3)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
@@ -177,7 +177,7 @@ def lrn(self, input, radius, alpha, beta, name, bias=1.0):

def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
return tf.concat(values=inputs, axis=axis, name=name)

def add(self, inputs, name):