forked from chesterkuo/DeepLearning_Docker
/
Dockerfile-Tensorflow-serving-gpu
115 lines (100 loc) · 4.34 KB
/
Dockerfile-Tensorflow-serving-gpu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
FROM nvidia/cuda:8.0-cudnn6-devel-ubuntu16.04
ENV http_proxy http://proxy-chain.intel.com:911
ENV https_proxy https://proxy-chain.intel.com:912
ENV HTTP_PROXY http://proxy-chain.intel.com:911
ENV HTTPS_PROXY https://proxy-chain.intel.com:912
RUN apt-get update && apt-get install -y \
build-essential \
curl \
git \
libfreetype6-dev \
libpng12-dev \
libzmq3-dev \
pkg-config \
python-dev \
python-numpy \
python-pip \
software-properties-common \
swig \
zip \
zlib1g-dev \
libcurl3-dev \
&& \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
python get-pip.py && \
rm get-pip.py
# Set up grpc
RUN pip install enum34 futures mock six && \
pip install --pre 'protobuf>=3.0.0a3' && \
pip install -i https://testpypi.python.org/simple --pre grpcio
# Set up Bazel.
# We need to add a custom PPA to pick up JDK8, since trusty doesn't
# have an openjdk8 backport. openjdk-r is maintained by a reliable contributor:
# Matthias Klose (https://launchpad.net/~doko). It will do until
# we either update the base image beyond 14.04 or openjdk-8 is
# finally backported to trusty; see e.g.
# https://bugs.launchpad.net/trusty-backports/+bug/1368094
RUN add-apt-repository -y ppa:openjdk-r/ppa && \
apt-get update && \
apt-get install -y openjdk-8-jdk openjdk-8-jre-headless && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Running bazel inside a `docker build` command causes trouble, cf:
# https://github.com/bazelbuild/bazel/issues/134
# The easiest solution is to set up a bazelrc file forcing --batch.
RUN echo "startup --batch" >>/root/.bazelrc
# Similarly, we need to workaround sandboxing issues:
# https://github.com/bazelbuild/bazel/issues/418
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/root/.bazelrc
ENV BAZELRC /root/.bazelrc
# Install the most recent bazel release.
ENV BAZEL_VERSION 0.4.5
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
curl -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
curl -fSsL -o /bazel/LICENSE https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
chmod +x bazel-*.sh && \
./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
cd / && \
rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
# Download TensorFlow Serving
RUN git clone --recurse-submodules https://github.com/tensorflow/serving && \
cd serving && \
git checkout
# Build TensorFlow with the CUDA configuration
ENV CI_BUILD_PYTHON python
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
ENV TF_NEED_CUDA 1
ENV TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2,6.0,6.1
# Fix paths so that CUDNN can be found
# See https://github.com/tensorflow/tensorflow/issues/8264
RUN ls -lah /usr/local/cuda/lib64/*
RUN mkdir /usr/lib/x86_64-linux-gnu/include/ && \
ln -s /usr/lib/x86_64-linux-gnu/include/cudnn.h /usr/lib/x86_64-linux-gnu/include/cudnn.h && \
ln -s /usr/include/cudnn.h /usr/local/cuda/include/cudnn.h && \
ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so /usr/local/cuda/lib64/libcudnn.so && \
ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.6 /usr/local/cuda/lib64/libcudnn.so.6
# Fix from https://github.com/tensorflow/serving/issues/327#issuecomment-282207825
WORKDIR /
RUN git clone https://github.com/NVIDIA/nccl.git && \
cd nccl/ && \
make CUDA_HOME=/usr/local/cuda && \
make install && \
mkdir -p /usr/local/include/external/nccl_archive/src && \
ln -s /usr/local/include/nccl.h /usr/local/include/external/nccl_archive/src/nccl.h
# Configure Tensorflow to use the GPU
WORKDIR /serving/tensorflow
RUN tensorflow/tools/ci_build/builds/configured GPU
# Build TensorFlow Serving and Install it in /usr/local/bin
WORKDIR /serving
RUN bazel build -c opt --config=cuda \
--crosstool_top=@local_config_cuda//crosstool:toolchain \
tensorflow_serving/model_servers:tensorflow_model_server && \
cp bazel-bin/tensorflow_serving/model_servers/tensorflow_model_server /usr/local/bin/ && \
bazel clean --expunge
#CMD ["/bin/bash"]
CMD /usr/local/bin/tensorflow_model_server --port=$TF_SERVING_PORTE --model_name=$TF_SERVING_MODEL_NAME --model_base_path=$TF_SERViNG_MODEL_PATH