diff --git a/DockerfileCloud b/DockerfileCloud index 5f3bb4b..c8c99ad 100644 --- a/DockerfileCloud +++ b/DockerfileCloud @@ -5,17 +5,17 @@ RUN apt-get install -y protobuf-compiler python-pil python-lxml python-tk RUN pip install pillow jupyter matplotlib WORKDIR /tensorflow RUN git clone https://github.com/tensorflow/models.git -WORKDIR models -WORKDIR research +WORKDIR models/research RUN curl -OL https://github.com/google/protobuf/releases/download/v3.0.0/protoc-3.0.0-linux-x86_64.zip RUN unzip protoc-3.0.0-linux-x86_64.zip RUN ./bin/protoc object_detection/protos/*.proto --python_out=. RUN echo "export PYTHONPATH=${PYTHONPATH}:`pwd`:`pwd`/slim" >> ~/.bashrc RUN python setup.py install +WORKDIR slim +RUN python setup.py install RUN git clone https://github.com/cocodataset/cocoapi.git -WORKDIR cocoapi/PythonAPI -RUN make +WORKDIR cocoapi/PythonAPI +RUN make install WORKDIR /tensorflow/models/research -RUN cp -r cocoapi/PythonAPI/pycocotools . COPY volume/ volume/ -CMD exec /bin/bash -c "trap : TERM INT; sleep infinity & wait" \ No newline at end of file +CMD exec /bin/bash -c "trap : TERM INT; sleep infinity & wait" diff --git a/README.md b/README.md index 59bcf6a..784102a 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ The app recognizes now the red car, the blue car and phones. Follow the next steps to train models with your own objects. ### 1) Development Environment Setup - + Invoke the following commands to download all necessary files and to set up a local development environment: ```bash @@ -58,7 +58,7 @@ $ cd object-detection-anki-overdrive-cars $ my_project_dir=$(pwd) $ export PROJECT_DIR=$my_project_dir $ docker build -t tensorflow-od . -$ cd $PROJECT_DIR/volume/data +$ cd $PROJECT_DIR/data $ wget http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_coco_2018_03_29.tar.gz $ tar xvzf ssd_mobilenet_v2_coco_2018_03_29.tar.gz $ cp -R ${PROJECT_DIR}/data ${PROJECT_DIR}/volume/data @@ -97,12 +97,12 @@ In the Docker container invoke these commands: ```bash $ cd volume -$ python create_tfrecord.py +$ python create_tfrecord.py $ exit ``` -### 3) Training of the Model +### 3) Training of the Model For testing purposes or if you have a machine with TensorFlow GPU support, you can train the models locally. @@ -163,7 +163,7 @@ $ kubectl cp default/train-56cfd5b9f-8x6q4:/tensorflow/models/research/volume/mo ``` -### 4) Save the Model +### 4) Save the Model The training generates checkpoint files in the 'volume/train' directory. These files need to be converted into something that is called a frozen graph file. That file is used later in the iOS app and the Python notebook. @@ -186,7 +186,7 @@ $ exit ``` -### 5) Testing of the Model +### 5) Testing of the Model Before using the model in the iOS app, you can test it in a Python notebook. You can also view the training results via Tensorboard. @@ -247,4 +247,4 @@ Change the lines 63ff with your own IoT configuration. Redeploy the app to your iOS device. -Rather than using the Node-RED flow of the original [project](https://github.com/IBM-Bluemix/node-mqtt-for-anki-overdrive) you need to deploy the version from this project [node-red-flow](node-red-flow) to your Node-RED instance. \ No newline at end of file +Rather than using the Node-RED flow of the original [project](https://github.com/IBM-Bluemix/node-mqtt-for-anki-overdrive) you need to deploy the version from this project [node-red-flow](node-red-flow) to your Node-RED instance.