Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cherry-pick benchmark fix from #9170 into 3.19.x #9176

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 4 additions & 4 deletions benchmarks/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ python_add_init: protoc_middleman protoc_middleman2
done \
done

python_cpp_pkg_flags = `pkg-config --cflags --libs python`
python_cpp_pkg_flags = `pkg-config --cflags --libs python3`

lib_LTLIBRARIES = libbenchmark_messages.la
libbenchmark_messages_la_SOURCES = python/python_benchmark_messages.cc
Expand All @@ -186,7 +186,7 @@ python-pure-python-benchmark: python_add_init
@echo export DYLD_LIBRARY_PATH=$(top_srcdir)/src/.libs >> python-pure-python-benchmark
@echo export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=\'python\' >> python-pure-python-benchmark
@echo cp $(srcdir)/python/py_benchmark.py tmp >> python-pure-python-benchmark
@echo python tmp/py_benchmark.py '$$@' >> python-pure-python-benchmark
@echo python3 tmp/py_benchmark.py '$$@' >> python-pure-python-benchmark
@chmod +x python-pure-python-benchmark

python-cpp-reflection-benchmark: python_add_init
Expand All @@ -196,7 +196,7 @@ python-cpp-reflection-benchmark: python_add_init
@echo export DYLD_LIBRARY_PATH=$(top_srcdir)/src/.libs >> python-cpp-reflection-benchmark
@echo export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=\'cpp\' >> python-cpp-reflection-benchmark
@echo cp $(srcdir)/python/py_benchmark.py tmp >> python-cpp-reflection-benchmark
@echo python tmp/py_benchmark.py '$$@' >> python-cpp-reflection-benchmark
@echo python3 tmp/py_benchmark.py '$$@' >> python-cpp-reflection-benchmark
@chmod +x python-cpp-reflection-benchmark

python-cpp-generated-code-benchmark: python_add_init libbenchmark_messages.la
Expand All @@ -206,7 +206,7 @@ python-cpp-generated-code-benchmark: python_add_init libbenchmark_messages.la
@echo export DYLD_LIBRARY_PATH=$(top_srcdir)/src/.libs >> python-cpp-generated-code-benchmark
@echo export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=\'cpp\' >> python-cpp-generated-code-benchmark
@echo cp $(srcdir)/python/py_benchmark.py tmp >> python-cpp-generated-code-benchmark
@echo python tmp/py_benchmark.py --cpp_generated '$$@' >> python-cpp-generated-code-benchmark
@echo python3 tmp/py_benchmark.py --cpp_generated '$$@' >> python-cpp-generated-code-benchmark
@chmod +x python-cpp-generated-code-benchmark

python-pure-python: python-pure-python-benchmark
Expand Down
23 changes: 13 additions & 10 deletions benchmarks/python/python_benchmark_messages.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,26 @@
#include "datasets/google_message3/benchmark_message3.pb.h"
#include "datasets/google_message4/benchmark_message4.pb.h"

static PyMethodDef python_benchmark_methods[] = {
{NULL, NULL, 0, NULL} /* Sentinel */
};

static struct PyModuleDef _module = {PyModuleDef_HEAD_INIT,
"libbenchmark_messages",
"Benchmark messages Python module",
-1,
NULL,
NULL,
NULL,
NULL,
NULL};

extern "C" {
PyMODINIT_FUNC
initlibbenchmark_messages() {
PyInit_libbenchmark_messages() {
benchmarks::BenchmarkDataset().descriptor();
benchmarks::proto3::GoogleMessage1().descriptor();
benchmarks::proto2::GoogleMessage1().descriptor();
benchmarks::proto2::GoogleMessage2().descriptor();
benchmarks::google_message3::GoogleMessage3().descriptor();
benchmarks::google_message4::GoogleMessage4().descriptor();

PyObject *m;

m = Py_InitModule("libbenchmark_messages", python_benchmark_methods);
if (m == NULL)
return;
return PyModule_Create(&_module);
}
}
12 changes: 6 additions & 6 deletions benchmarks/util/result_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def __parse_cpp_result(filename):
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
with open(filename, encoding="utf-8") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
data_filename = "".join(
Expand Down Expand Up @@ -96,7 +96,7 @@ def __parse_synthetic_result(filename):
return
if filename[0] != "/":
filename = os.path.dirname(os.path.abspath(__file__)) + "/" + filename
with open(filename, "rb") as f:
with open(filename, encoding="utf-8") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
__results.append({
Expand Down Expand Up @@ -126,7 +126,7 @@ def __parse_python_result(filename):
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
with open(filename, encoding="utf-8") as f:
results_list = json.loads(f.read())
for results in results_list:
for result in results:
Expand Down Expand Up @@ -176,7 +176,7 @@ def __parse_java_result(filename):
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
with open(filename, encoding="utf-8") as f:
results = json.loads(f.read())
for result in results:
total_weight = 0
Expand Down Expand Up @@ -212,7 +212,7 @@ def __parse_go_result(filename):
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
with open(filename, encoding="utf-8") as f:
for line in f:
result_list = re.split(r"[\ \t]+", line)
if result_list[0][:9] != "Benchmark":
Expand Down Expand Up @@ -252,7 +252,7 @@ def __parse_custom_result(filename, language):
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
with open(filename, encoding="utf-8") as f:
results = json.loads(f.read())
for result in results:
_, avg_size = __get_data_size(result["filename"])
Expand Down
8 changes: 5 additions & 3 deletions kokoro/linux/benchmark/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,10 @@ popd
./configure CXXFLAGS="-fPIC -O2"
make -j8
pushd python
python setup.py build --cpp_implementation
pip install . --user
virtualenv -p python3 env
source env/bin/activate
python3 setup.py build --cpp_implementation
pip3 install --install-option="--cpp_implementation" .
popd

# build and run Python benchmark
Expand Down Expand Up @@ -91,7 +93,7 @@ cat tmp/python_result.json
# print the postprocessed results to the build job log
# TODO(jtattermusch): re-enable uploading results to bigquery (it is currently broken)
make python_add_init
env LD_LIBRARY_PATH="${repo_root}/src/.libs" python -m util.result_parser \
env LD_LIBRARY_PATH="${repo_root}/src/.libs" python3 -m util.result_parser \
-cpp="../tmp/cpp_result.json" -java="../tmp/java_result.json" -python="../tmp/python_result.json"
popd