Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add batchinv function #192

Merged
merged 17 commits into from
Dec 2, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
27 changes: 14 additions & 13 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -87,20 +87,21 @@ endif()
include(${PROJECT_SOURCE_DIR}/build-tools/cmake/get_cuda_version.cmake)

set(CPACK_PACKAGE_NAME "nnabla-cpplib-cuda${CUDA_VERSION}-cudnn${CUDNN_VERSION}")
find_program(LSB_RELEASE lsb_release)
message("LSB_RELEASE" ${LSB_RELEASE})
if(EXISTS ${LSB_RELEASE})

execute_process(COMMAND ${LSB_RELEASE} -si
OUTPUT_VARIABLE LSB_RELEASE_ID
OUTPUT_STRIP_TRAILING_WHITESPACE
)
execute_process(COMMAND ${LSB_RELEASE} -sr
OUTPUT_VARIABLE LSB_RELEASE_RELEASE
OUTPUT_STRIP_TRAILING_WHITESPACE
)
set(CPACK_SYSTEM_NAME ${LSB_RELEASE_ID}${LSB_RELEASE_RELEASE})
if(UNIX)
find_program(LSB_RELEASE lsb_release)
if(EXISTS ${LSB_RELEASE})
execute_process(COMMAND ${LSB_RELEASE} -si
OUTPUT_VARIABLE LSB_RELEASE_ID
OUTPUT_STRIP_TRAILING_WHITESPACE
)
execute_process(COMMAND ${LSB_RELEASE} -sr
OUTPUT_VARIABLE LSB_RELEASE_RELEASE
OUTPUT_STRIP_TRAILING_WHITESPACE
)
set(CPACK_SYSTEM_NAME ${LSB_RELEASE_ID}${LSB_RELEASE_RELEASE})
endif()
endif()

set(CPACK_PACKAGE_VERSION_MAJOR ${version_major})
set(CPACK_PACKAGE_VERSION_MINOR ${version_minor})
set(CPACK_PACKAGE_VERSION_PATCH ${version_patch})
Expand Down
18 changes: 12 additions & 6 deletions build-tools/code_generator/function_types.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -304,9 +304,9 @@ Broadcast:
Tile:
float: [float]
half: [Half]
# OneHot:
# float: [int, float]
# half: [int, Half]
OneHot:
float: [int, float]
half: [int, Half]
Flip:
float: [float]
half: [Half]
Expand All @@ -319,6 +319,9 @@ Sort:
Reshape:
float: [float]
half: [Half]
BatchInv:
float: [float]
# half: [Half]
MatrixDiag:
float: [float]
half: [Half]
Expand Down Expand Up @@ -351,9 +354,9 @@ RandomChoice:
# RandomCrop:
# float: [float]
# half: [Half]
# RandomFlip:
# float: [float]
# half: [Half]
RandomFlip:
float: [float]
half: [Half]
# RandomShift:
# float: [float]
# half: [Half]
Expand Down Expand Up @@ -417,6 +420,9 @@ FixedPointQuantize:
Pow2Quantize:
float: [float]
half: [Half]
MinMaxQuantize:
float: [float]
half: [Half]
TopNError:
float: [float, int]
half: [Half, int]
Expand Down
5 changes: 0 additions & 5 deletions build-tools/code_generator/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import io
import os
from os.path import abspath, dirname, join
import sys

# Set path to <NNabla root>/build-tools/code_generator to import the following two
from utils.common import check_update
from utils.type_conv import type_from_proto
import code_generator_utils as utils

import itertools

here = abspath(dirname(abspath(__file__)))
base = abspath(here + '/../..')
Expand Down
24 changes: 19 additions & 5 deletions build-tools/make/build-with-docker.mk
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ DOCKER_RUN_OPTS += -e NNABLA_DIRECTORY=$(NNABLA_DIRECTORY)
NNABLA_EXT_CUDA_DIRECTORY ?= $(shell pwd)
DOCKER_RUN_OPTS += -e NNABLA_EXT_CUDA_DIRECTORY=$(NNABLA_EXT_CUDA_DIRECTORY)
DOCKER_RUN_OPTS += -e CMAKE_OPTS=$(CMAKE_OPTS)
DOCKER_RUN_OPTS += -e INCLUDE_CUDA_CUDNN_LIB_IN_WHL=$(INCLUDE_CUDA_CUDNN_LIB_IN_WHL)

include $(NNABLA_EXT_CUDA_DIRECTORY)/build-tools/make/options.mk
ifndef NNABLA_BUILD_INCLUDED
Expand All @@ -35,7 +36,10 @@ ifndef NNABLA_BUILD_WITH_DOCKER_INCLUDED
include $(NNABLA_DIRECTORY)/build-tools/make/build-with-docker.mk
endif

NVIDIA_DOCKER_WRAPPER=$(NNABLA_EXT_CUDA_DIRECTORY)/build-tools/scripts/nvidia-docker.sh

CUDA_SUFFIX = $(CUDA_VERSION_MAJOR)$(CUDA_VERSION_MINOR)-cudnn$(CUDNN_VERSION)
CUDA_VERSION_MAJOR_MINOR = $(CUDA_VERSION_MAJOR).$(CUDA_VERSION_MINOR)

DOCKER_IMAGE_ID_BUILD_NNABLA_EXT_CUDA = $(shell md5sum $(NNABLA_EXT_CUDA_DIRECTORY)/docker/development/Dockerfile.build$(ARCH_SUFFIX) |cut -d \ -f 1)
DOCKER_IMAGE_BUILD_NNABLA_EXT_CUDA ?= $(DOCKER_IMAGE_NAME_BASE)-build-cuda$(CUDA_SUFFIX)$(ARCH_SUFFIX):$(DOCKER_IMAGE_ID_BUILD_NNABLA_EXT_CUDA)
Expand Down Expand Up @@ -64,6 +68,8 @@ docker_image_build_cuda$(DOCKER_IMAGE_TARGET_SUFFIX):
docker pull $(DOCKER_IMAGE_BUILD_CUDA_BASE) && \
(cd $(NNABLA_EXT_CUDA_DIRECTORY) && docker build $(DOCKER_BUILD_ARGS)\
--build-arg BASE=$(DOCKER_IMAGE_BUILD_CUDA_BASE) \
--build-arg CUDA_VERSION_MAJOR_MINOR=$(CUDA_VERSION_MAJOR_MINOR) \
--build-arg ARCH_SUFFIX=$(ARCH_SUFFIX) \
-t $(DOCKER_IMAGE_BUILD_NNABLA_EXT_CUDA) \
-f docker/development/Dockerfile.build$(ARCH_SUFFIX) \
.) \
Expand All @@ -75,6 +81,8 @@ docker_image_build_cuda_multi_gpu$(DOCKER_IMAGE_TARGET_SUFFIX):
docker pull $(DOCKER_IMAGE_BUILD_CUDA_MULTI_GPU_BASE) && \
(cd $(NNABLA_EXT_CUDA_DIRECTORY) && docker build $(DOCKER_BUILD_ARGS) \
--build-arg BASE=$(DOCKER_IMAGE_BUILD_CUDA_MULTI_GPU_BASE) \
--build-arg CUDA_VERSION_MAJOR_MINOR=$(CUDA_VERSION_MAJOR_MINOR) \
--build-arg ARCH_SUFFIX=$(ARCH_SUFFIX) \
-t $(DOCKER_IMAGE_BUILD_NNABLA_EXT_CUDA_MULTI_GPU) \
-f docker/development/Dockerfile.build-multi-gpu$(ARCH_SUFFIX) \
.) \
Expand Down Expand Up @@ -147,17 +155,17 @@ bwd-nnabla-ext-cuda-wheel-multi-gpu-only: docker_image_build_cuda_multi_gpu
.PHONY: bwd-nnabla-ext-cuda-test
bwd-nnabla-ext-cuda-test: docker_image_build_cuda
cd $(NNABLA_EXT_CUDA_DIRECTORY) \
&& nvidia-docker run $(DOCKER_RUN_OPTS) $(DOCKER_IMAGE_BUILD_NNABLA_EXT_CUDA) make -f build-tools/make/build.mk nnabla-ext-cuda-test-local
&& ${NVIDIA_DOCKER_WRAPPER} run $(DOCKER_RUN_OPTS) $(DOCKER_IMAGE_BUILD_NNABLA_EXT_CUDA) make -f build-tools/make/build.mk nnabla-ext-cuda-test-local

.PHONY: bwd-nnabla-ext-cuda-multi-gpu-test
bwd-nnabla-ext-cuda-multi-gpu-test: docker_image_build_cuda_multi_gpu
cd $(NNABLA_EXT_CUDA_DIRECTORY) \
&& nvidia-docker run $(DOCKER_RUN_OPTS) $(DOCKER_IMAGE_BUILD_NNABLA_EXT_CUDA_MULTI_GPU) make -f build-tools/make/build.mk nnabla-ext-cuda-multi-gpu-test-local
&& ${NVIDIA_DOCKER_WRAPPER} run $(DOCKER_RUN_OPTS) $(DOCKER_IMAGE_BUILD_NNABLA_EXT_CUDA_MULTI_GPU) make -f build-tools/make/build.mk nnabla-ext-cuda-multi-gpu-test-local

.PHONY: bwd-nnabla-ext-cuda-shell
bwd-nnabla-ext-cuda-shell: docker_image_build_cuda
cd $(NNABLA_EXT_CUDA_DIRECTORY) \
&& nvidia-docker run $(DOCKER_RUN_OPTS) -it --rm ${DOCKER_IMAGE_BUILD_NNABLA_EXT_CUDA} make nnabla-ext-cuda-shell
&& ${NVIDIA_DOCKER_WRAPPER} run $(DOCKER_RUN_OPTS) -it --rm ${DOCKER_IMAGE_BUILD_NNABLA_EXT_CUDA} make nnabla-ext-cuda-shell

########################################################################################################################
# Docker image with current nnabla
Expand All @@ -173,7 +181,10 @@ docker_image_nnabla_ext_cuda:
&& cp $(BUILD_EXT_CUDA_DIRECTORY_WHEEL)/dist/*.whl . \
&& echo ADD $(shell basename $(BUILD_EXT_CUDA_DIRECTORY_WHEEL)/dist/*.whl) /tmp/ >>Dockerfile \
&& echo RUN pip install /tmp/$(shell basename $(BUILD_EXT_CUDA_DIRECTORY_WHEEL)/dist/*.whl) >>Dockerfile \
&& docker build --build-arg BASE=$${BASE} $(DOCKER_BUILD_ARGS) -t $(DOCKER_IMAGE_NNABLA_EXT_CUDA) . \
&& docker build --build-arg BASE=$${BASE} $(DOCKER_BUILD_ARGS) \
--build-arg CUDA_VERSION_MAJOR_MINOR=$(CUDA_VERSION_MAJOR_MINOR) \
--build-arg ARCH_SUFFIX=$(ARCH_SUFFIX) \
-t $(DOCKER_IMAGE_NNABLA_EXT_CUDA) . \
&& rm -f $(shell basename $(BUILD_DIRECTORY_WHEEL)/dist/*.whl) \
&& rm -f $(shell basename $(BUILD_EXT_CUDA_DIRECTORY_WHEEL_MULTI_GPU)/dist/*.whl) \
&& rm -f Dockerfile
Expand All @@ -191,7 +202,10 @@ docker_image_nnabla_ext_cuda_multi_gpu: bwd-nnabla-ext-cuda-wheel-multi-gpu
&& cp $(BUILD_EXT_CUDA_DIRECTORY_WHEEL_MULTI_GPU)/dist/*.whl . \
&& echo ADD $(shell basename $(BUILD_EXT_CUDA_DIRECTORY_WHEEL_MULTI_GPU)/dist/*.whl) /tmp/ >>Dockerfile \
&& echo RUN pip install /tmp/$(shell basename $(BUILD_EXT_CUDA_DIRECTORY_WHEEL_MULTI_GPU)/dist/*.whl) >>Dockerfile \
&& docker build --build-arg BASE=$${BASE} $(DOCKER_BUILD_ARGS) -t $(DOCKER_IMAGE_NNABLA_EXT_CUDA_MULTI_GPU) . \
&& docker build --build-arg BASE=$${BASE} $(DOCKER_BUILD_ARGS) \
--build-arg CUDA_VERSION_MAJOR_MINOR=$(CUDA_VERSION_MAJOR_MINOR) \
--build-arg ARCH_SUFFIX=$(ARCH_SUFFIX) \
-t $(DOCKER_IMAGE_NNABLA_EXT_CUDA_MULTI_GPU) . \
&& rm -f $(shell basename $(BUILD_DIRECTORY_WHEEL)/dist/*.whl) \
&& rm -f $(shell basename $(BUILD_EXT_CUDA_DIRECTORY_WHEEL_MULTI_GPU)/dist/*.whl) \
&& rm -f Dockerfile
Expand Down
8 changes: 5 additions & 3 deletions build-tools/make/build.mk
Original file line number Diff line number Diff line change
Expand Up @@ -181,15 +181,17 @@ nnabla-ext-cuda-test:
nnabla-ext-cuda-test-local: nnabla-install nnabla-ext-cuda-install
cd $(BUILD_EXT_CUDA_DIRECTORY_WHEEL) \
&& PYTHONPATH=$(NNABLA_EXT_CUDA_DIRECTORY)/python/test \
python -m pytest $(NNABLA_DIRECTORY)/python/test
&& $(NNABLA_DIRECTORY)/build-tools/make/pytest.sh $(NNABLA_DIRECTORY)/python/test \
&& $(NNABLA_DIRECTORY)/build-tools/make/pytest.sh $(NNABLA_EXT_CUDA_DIRECTORY)/python/test

.PHONY: nnabla-ext-cuda-multi-gpu-test-local
nnabla-ext-cuda-multi-gpu-test-local: nnabla-ext-cuda-multi-gpu-install
cd $(BUILD_EXT_CUDA_DIRECTORY_WHEEL_MULTI_GPU) \
&& PYTHONPATH=$(NNABLA_EXT_CUDA_DIRECTORY)/python/test:$(NNABLA_DIRECTORY)/python/test \
mpiexec -q -n 2 python -m pytest \
mpiexec -q -n 2 $(NNABLA_DIRECTORY)/build-tools/make/pytest.sh \
--test-communicator \
--communicator-gpus=0,1 \
$(NNABLA_DIRECTORY)/python/test/communicator
cd $(BUILD_EXT_CUDA_DIRECTORY_WHEEL_MULTI_GPU) \
&& python -m pytest $(NNABLA_DIRECTORY)/python/test
&& $(NNABLA_DIRECTORY)/build-tools/make/pytest.sh $(NNABLA_DIRECTORY)/python/test \
&& $(NNABLA_DIRECTORY)/build-tools/make/pytest.sh $(NNABLA_EXT_CUDA_DIRECTORY)/python/test
4 changes: 2 additions & 2 deletions build-tools/make/options.mk
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ DOCKER_RUN_OPTS += -e CUDA_VERSION_MINOR=$(CUDA_VERSION_MINOR)
export CUDNN_VERSION ?= 7
DOCKER_RUN_OPTS += -e CUDNN_VERSION=$(CUDNN_VERSION)

export WHL_NO_PREFIX ?= False
DOCKER_RUN_OPTS += -e WHL_NO_PREFIX=$(WHL_NO_PREFIX)
export WHL_NO_CUDA_SUFFIX ?= False
DOCKER_RUN_OPTS += -e WHL_NO_CUDA_SUFFIX=$(WHL_NO_CUDA_SUFFIX)

ifndef NNABLA_OPTIONS_INCLUDED
include $(NNABLA_DIRECTORY)/build-tools/make/options.mk
Expand Down
19 changes: 19 additions & 0 deletions build-tools/scripts/nvidia-docker.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#!/bin/bash

if which nvidia-docker >/dev/null
then
nvidia-docker "$@"
else
if [ "$1" == "run" ]
then
GPU=\"device=${NV_GPU}\"
if [ "${NV_GPU}" == "" ]
then
GPU=all
fi
docker run --gpus=$GPU "${@:2}"
else
docker "$@"
fi
fi

6 changes: 6 additions & 0 deletions docker/development/Dockerfile.build
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,8 @@ ENV PYVERNAME=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}
# Limitation: numpy>=1.17 does not support python2.7
ADD python/requirements.txt /tmp/deps/

ARG CUDA_VERSION_MAJOR_MINOR
ARG ARCH_SUFFIX
RUN umask 0 \
&& cd /tmp/deps \
&& wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \
Expand All @@ -159,6 +161,10 @@ RUN umask 0 \
&& conda activate nnabla-build \
&& pip install numpy\<1.17 \
&& pip install --only-binary -U -r /tmp/deps/requirements.txt \
&& ( [ "$CUDA_VERSION_MAJOR_MINOR" = "10.0" ] || [ "$CUDA_VERSION_MAJOR_MINOR" = "9.0" ] \
&& [ "x$ARCH_SUFFIX" = "x" ] \
&& pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/cuda/${CUDA_VERSION_MAJOR_MINOR} nvidia-dali \
|| echo "Skip DALI installation (CUDA=$CUDA_VERSION_MAJOR_MINOR ARCH=$ARCH_SUFFIX)" ) \
&& conda clean -y --all \
&& cd / \
&& rm -rf /tmp/*
Expand Down
6 changes: 6 additions & 0 deletions docker/development/Dockerfile.build-multi-gpu
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ ENV PYVERNAME=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}
# Limitation: numpy>=1.17 does not support python2.7
ADD python/requirements.txt /tmp/deps/

ARG CUDA_VERSION_MAJOR_MINOR
ARG ARCH_SUFFIX
RUN umask 0 \
&& cd /tmp/deps \
&& wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \
Expand All @@ -89,6 +91,10 @@ RUN umask 0 \
&& conda activate nnabla-build \
&& pip install numpy\<1.17 \
&& pip install --only-binary -U -r /tmp/deps/requirements.txt \
&& ( [ "$CUDA_VERSION_MAJOR_MINOR" = "10.0" ] || [ "$CUDA_VERSION_MAJOR_MINOR" = "9.0" ] \
&& [ "x$ARCH_SUFFIX" = "x" ] \
&& pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/cuda/${CUDA_VERSION_MAJOR_MINOR} nvidia-dali \
|| echo "Skip DALI installation (CUDA=$CUDA_VERSION_MAJOR_MINOR ARCH=$ARCH_SUFFIX)" ) \
&& conda clean -y --all \
&& cd / \
&& rm -rf /tmp/*
Expand Down
6 changes: 6 additions & 0 deletions docker/runtime/Dockerfile.runtime
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ ARG PYTHON_VERSION_MAJOR
ARG PYTHON_VERSION_MINOR
ENV PYVERNAME=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}

ARG CUDA_VERSION_MAJOR_MINOR
ARG ARCH_SUFFIX
RUN umask 0 \
&& cd /tmp/deps \
&& wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \
Expand All @@ -21,6 +23,10 @@ RUN umask 0 \
&& . /opt/miniconda3/bin/activate \
&& conda create -n nnabla-build python=${PYVERNAME} \
&& conda activate nnabla-build \
&& ( [ "$CUDA_VERSION_MAJOR_MINOR" = "10.0" ] || [ "$CUDA_VERSION_MAJOR_MINOR" = "9.0" ] \
&& [ "x$ARCH_SUFFIX" = "x" ] \
&& pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/cuda/${CUDA_VERSION_MAJOR_MINOR} nvidia-dali \
|| echo "Skip DALI installation (CUDA=$CUDA_VERSION_MAJOR_MINOR ARCH=$ARCH_SUFFIX)" ) \
&& conda clean -y --all \
&& cd / \
&& rm -rf /tmp/*
Expand Down
6 changes: 6 additions & 0 deletions docker/runtime/Dockerfile.runtime-multi-gpu
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ ENV PYVERNAME=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}

ADD python/requirements.txt /tmp/deps/

ARG CUDA_VERSION_MAJOR_MINOR
ARG ARCH_SUFFIX
RUN umask 0 \
&& cd /tmp/deps \
&& wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \
Expand All @@ -23,6 +25,10 @@ RUN umask 0 \
&& . /opt/miniconda3/bin/activate \
&& conda create -n nnabla-build python=${PYVERNAME} \
&& conda activate nnabla-build \
&& ( [ "$CUDA_VERSION_MAJOR_MINOR" = "10.0" ] || [ "$CUDA_VERSION_MAJOR_MINOR" = "9.0" ] \
&& [ "x$ARCH_SUFFIX" = "x" ] \
&& pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/cuda/${CUDA_VERSION_MAJOR_MINOR} nvidia-dali \
|| echo "Skip DALI installation (CUDA=$CUDA_VERSION_MAJOR_MINOR ARCH=$ARCH_SUFFIX)" ) \
&& pip install --only-binary -U -r /tmp/deps/requirements.txt \
&& conda clean -y --all \
&& cd / \
Expand Down
18 changes: 16 additions & 2 deletions examples/cpp/mnist_collection/train_dcgan_cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ using namespace std;
using namespace nbla;

#include <dcgan_training.hpp>
#include <string.h>

/******************************************/
// Example of mnist training
Expand All @@ -31,8 +32,21 @@ int main(int argc, char *argv[]) {
{"cudnn:float", "cuda:float", "cpu:float"}, "CudaCachedArray", "0"};

// Execute training
if (!dcgan_training(ctx)) {
return (-1);
if (argc < 2 || strcmp(argv[1], "--static") == 0) {
std::cout << "Execute training with static graph" << std::endl;
if (!dcgan_training_with_static_graph(ctx)) {
return -1;
}
} else if (strcmp(argv[1], "--dynamic") == 0) {
std::cout << "Execute training with dynamic graph" << std::endl;
if (!dcgan_training_with_dynamic_graph(ctx)) {
return -1;
}
} else {
std::cerr << std::endl;
std::cerr << "Usage: " << argv[0] << " --static / --dynamic " << std::endl;
std::cerr << std::endl;
return -1;
}

return 0;
Expand Down
18 changes: 16 additions & 2 deletions examples/cpp/mnist_collection/train_lenet_classifier_cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ using namespace std;
using namespace nbla;

#include <lenet_training.hpp>
#include <string.h>

/******************************************/
// Example of lenet training
Expand All @@ -31,8 +32,21 @@ int main(int argc, char *argv[]) {
{"cudnn:float", "cuda:float", "cpu:float"}, "CudaCachedArray", "0"};

// Execute training
if (!lenet_training(ctx)) {
return (-1);
if (argc < 2 || strcmp(argv[1], "--static") == 0) {
std::cout << "Execute training with static graph" << std::endl;
if (!lenet_training_with_static_graph(ctx)) {
return -1;
}
} else if (strcmp(argv[1], "--dynamic") == 0) {
std::cout << "Execute training with dynamic graph" << std::endl;
if (!lenet_training_with_dynamic_graph(ctx)) {
return -1;
}
} else {
std::cerr << std::endl;
std::cerr << "Usage: " << argv[0] << " --static / --dynamic " << std::endl;
std::cerr << std::endl;
return -1;
}

return 0;
Expand Down