New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adapt to python-protobuf >= 3.2.0 #105

Open
wants to merge 8 commits into
base: master
from
View
@@ -8,3 +8,8 @@
# Python cache
*.pyc
# IDEA project files.
.idea/
# Generated files.
*.npy
View
@@ -29,10 +29,18 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path, phase
if caffemodel_path is not None:
data = transformer.transform_data()
print_stderr('Saving data...')
# create directory if not existing
dirname = os.path.dirname(data_output_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(data_output_path, 'wb') as data_out:
np.save(data_out, data)
if code_output_path:
print_stderr('Saving source...')
# create directory if not existing
dirname = os.path.dirname(code_output_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(code_output_path, 'wb') as src_out:
src_out.write(transformer.transform_source())
print_stderr('Done.')
@@ -0,0 +1,11 @@
checkpoint
*.index
*.meta
*.pb
*.pbtxt
*.data-*-of-*
*.py
!save_model.py
!__init__.py
@@ -0,0 +1,17 @@
# Example of Saving Model
This is an example of how to save the GraphDef and variables of a converted model
in the Tensorflow official form. By doing this, the converted model can be
conveniently applied with Tensorflow APIs in other languages.
For example, if a converted model is named "VGG", the generated code file should
be named as "VGG.py", and the class name inside should remain "CaffeNet".
The module "VGG" should be able to be directly imported. So put it inside the
[save_graphdef](save_graphdef) folder, or add it to "sys.path".
To save model variables, pass the path of the converted data file (e.g. VGG.npy)
to the parameter "--data-input-path".
A "VGG_frozen.pb' is also generated with all variables converted into constants
in the saved graph.
No changes.
@@ -0,0 +1,51 @@
#!/usr/bin/env python
import argparse
import os.path as osp
import sys
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
def save(name, data_input_path):
def getpardir(path): return osp.split(path)[0]
sys.path.append(getpardir(getpardir(getpardir(osp.realpath(__file__)))))
# Import the converted model's class
caffe_net_module = __import__(name)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
image_input = tf.placeholder(tf.float32, shape=[1, 227, 227, 3], name="data")
net = caffe_net_module.CaffeNet({'data': image_input})
# Save protocol buffer
pb_name = name + '.pb'
tf.train.write_graph(sess.graph_def, '.', pb_name + 'txt', True)
tf.train.write_graph(sess.graph_def, '.', pb_name, False)
if data_input_path is not None:
# Load the data
sess.run(tf.global_variables_initializer())
net.load(data_input_path, sess)
# Save the data
saver = saver_lib.Saver(tf.global_variables())
checkpoint_prefix = osp.join(osp.curdir, name + '.ckpt')
checkpoint_path = saver.save(sess, checkpoint_prefix)
# Freeze the graph
freeze_graph.freeze_graph(pb_name, "",
True, checkpoint_path, 'fc8/fc8',
'save/restore_all', 'save/Const:0',
name + '_frozen.pb', False, "")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('name', help='Name of the converted model')
parser.add_argument('--data-input-path', help='Converted data input path')
args = parser.parse_args()
save(args.name, args.data_input_path)
if __name__ == '__main__':
main()
File renamed without changes.
View
@@ -14,8 +14,8 @@ def import_caffe(self):
self.caffe = caffe
except ImportError:
# Fall back to the protobuf implementation
from . import caffepb
self.caffepb = caffepb
from . import caffe_pb2
self.caffepb = caffe_pb2
show_fallback_warning()
if self.caffe:
# Use the protobuf code from the imported distribution.
@@ -130,11 +130,11 @@ def conv(self,
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
input_groups = tf.split(input,group, 3)
kernel_groups = tf.split(kernel, group, 3)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
output = tf.concat(output_groups, 3)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
@@ -177,7 +177,7 @@ def lrn(self, input, radius, alpha, beta, name, bias=1.0):
@layer
def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
return tf.concat(values=inputs, axis=axis, name=name)
@layer
def add(self, inputs, name):