diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ff124d3..a507961 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,22 +20,18 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -e .[testing] - pip install matplotlib numpy opencv-python pdbpp tensorboard tensorflow-cpu scipy Pillow gdown isort black flake8 - pip install mypy types-requests pytype pytest pytest-cov pre-commit pytest-mock pytest-flask + pip install -e .[tfcpu,api,dev,testing,linting] - name: Test with pytest run: | pip install python-coveralls - python -m pytest --cov=./dfp --cov-report term-missing - pip install coveragepy-lcov - coveragepy-lcov --data_file_path .coverage --output_file_path lcov.info + python -m pytest --cov=./dfp --cov-report lcov:lcov.info - name: Build run: | - pip install setuptools sdist wheel twine - pip install -r requirements.txt - python setup.py sdist bdist_wheel + python -m pip install --upgrade pip + pip install -e .[tfcpu,api] + - uses: "marvinpinto/action-automatic-releases@latest" with: repo_token: "${{ secrets.GITHUB_TOKEN }}" @@ -43,6 +39,7 @@ jobs: files: | ./dist/*tar.gz ./dist/*.whl + - name: Publish distribution 📦 to PyPI uses: pypa/gh-action-pypi-publish@master with: diff --git a/.gitignore b/.gitignore index 2330309..8b735cc 100644 --- a/.gitignore +++ b/.gitignore @@ -146,3 +146,4 @@ model/ cov.info lcov.info *.h5 +out.jpg diff --git a/Dockerfile b/Dockerfile index 4afe338..ed907c1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,8 @@ RUN pip install -e .[tfgpu,api] # RUN gdown https://drive.google.com/uc?id=1czUSFvk6Z49H-zRikTc67g2HUUz4imON # RUN unzip log.zip # RUN rm log.zip -ADD log log +COPY docs/app.toml /docs/app.toml +ADD log/store log/store COPY resources /usr/local/resources RUN mv /usr/local/resources . diff --git a/README.md b/README.md index 071c096..3feadb3 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ -# TF2DeepFloorplan [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [](https://colab.research.google.com/github/zcemycl/TF2DeepFloorplan/blob/master/deepfloorplan.ipynb) ![example workflow](https://github.com/zcemycl/TF2DeepFloorplan/actions/workflows/main.yml/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/zcemycl/TF2DeepFloorplan/badge.svg?branch=main)](https://coveralls.io/github/zcemycl/TF2DeepFloorplan?branch=main) - +# TF2DeepFloorplan [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [](https://colab.research.google.com/github/zcemycl/TF2DeepFloorplan/blob/master/deepfloorplan.ipynb) ![example workflow](https://github.com/zcemycl/TF2DeepFloorplan/actions/workflows/main.yml/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/zcemycl/TF2DeepFloorplan/badge.svg?branch=main)](https://coveralls.io/github/zcemycl/TF2DeepFloorplan?branch=main)[![Hits](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fgithub.com%2Fzcemycl%2FTF2DeepFloorplan&count_bg=%2379C83D&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=hits&edge_flat=false)](https://hits.seeyoufarm.com) This repo contains a basic procedure to train and deploy the DNN model suggested by the paper ['Deep Floor Plan Recognition using a Multi-task Network with Room-boundary-Guided Attention'](https://arxiv.org/abs/1908.11025). It rewrites the original codes from [zlzeng/DeepFloorplan](https://github.com/zlzeng/DeepFloorplan) into newer versions of Tensorflow and Python.
Network Architectures from the paper,
@@ -36,8 +35,10 @@ pip install -e .[tfgpu,api,dev,testing,linting] ``` python -m dfp.train [--batchsize 2][--lr 1e-4][--epochs 1000] [--logdir 'log/store'][--modeldir 'model/store'] -[--saveTensorInterval 10][--saveModelInterval 20] -[--tfmodel 'subclass'/'func'] +[--save-tensor-interval 10][--save-model-interval 20] +[--tfmodel 'subclass'/'func'][--feature-channels 256 128 64 32] +[--backbone 'vgg16'/'mobilenetv1'/'mobilenetv2'/'resnet50'] +[--feature-names block1_pool block2_pool block3_pool block4_pool block5_pool] ``` - for example, ``` @@ -52,7 +53,11 @@ tensorboard --logdir=log/store ``` python -m dfp.convert2tflite [--modeldir model/store] [--tflitedir model/store/model.tflite] -[--quantize] +[--loadmethod 'log'/'none'/'pb'] +[--quantize][--tfmodel 'subclass'/'func'] +[--feature-channels 256 128 64 32] +[--backbone 'vgg16'/'mobilenetv1'/'mobilenetv2'/'resnet50'] +[--feature-names block1_pool block2_pool block3_pool block4_pool block5_pool] ``` 6. Download and unzip model from google drive, ``` @@ -70,6 +75,9 @@ python -m dfp.deploy [--image 'path/to/image'] [--loadmethod 'log'/'pb'/'tflite'] [--weight 'log/store/G'/'model/store'/'model/store/model.tflite'] [--tfmodel 'subclass'/'func'] +[--feature-channels 256 128 64 32] +[--backbone 'vgg16'/'mobilenetv1'/'mobilenetv2'/'resnet50'] +[--feature-names block1_pool block2_pool block3_pool block4_pool block5_pool] ``` - for example, ``` @@ -83,16 +91,20 @@ python -m dfp.deploy --image floorplan.jpg --weight log/store/G docker build -t tf_docker -f Dockerfile . docker run -d -p 1111:1111 tf_docker:latest docker run --gpus all -d -p 1111:1111 tf_docker:latest + +# special for hot reloading flask +docker run -v ${PWD}/src/dfp/app.py:/src/dfp/app.py -v ${PWD}/src/dfp/deploy.py:/src/dfp/deploy.py -d -p 1111:1111 tf_docker:latest +docker logs `docker ps | grep "tf_docker:latest" | awk '{ print $1 }'` --follow ``` 2. Call the api for output. ``` curl -H "Content-Type: application/json" --request POST \ - -d '{"uri":"https://cdn.cnn.com/cnnnext/dam/assets/200212132008-04-london-rental-market-intl-exlarge-169.jpg","colorize":1,"postprocess":0, "output":"/tmp"}' \ - http://0.0.0.0:1111/process --output /tmp/tmp.jpg + -d '{"uri":"https://cdn.cnn.com/cnnnext/dam/assets/200212132008-04-london-rental-market-intl-exlarge-169.jpg","colorize":1,"postprocess":0}' \ + http://0.0.0.0:1111/uri --output /tmp/tmp.jpg -curl --request POST -F "file=@resources/30939153.jpg;type=image/jpeg" \ - -F "postprocess=0" -F "colorize=0" -F "output=/tmp" http://0.0.0.0:1111/process --output out.jpg +curl --request POST -F "file=@resources/30939153.jpg" \ + -F "postprocess=0" -F "colorize=0" http://0.0.0.0:1111/upload --output out.jpg ``` 3. If you run `app.py` without docker, the second curl for file upload will not work. @@ -148,27 +160,27 @@ git branch -d xx-features ## Optimization - Backbone Comparison in Size -|Backbone|log|pb|tflite| -|---|---|---|---| -|VGG16|130.5Mb|119Mb|45.3Mb| -|MobileNetV1|102.1Mb|86.7Mb|50.2Mb| -|MobileNetV2|129.3Mb|94.4Mb|57.9Mb| -|ResNet50|214Mb|216Mb|107.2Mb| +|Backbone|log|pb|tflite|toml| +|---|---|---|---|---| +|VGG16|130.5Mb|119Mb|45.3Mb|[link](docs/experiments/vgg16/exp1)| +|MobileNetV1|102.1Mb|86.7Mb|50.2Mb|[link](docs/experiments/mobilenetv1/exp1)| +|MobileNetV2|129.3Mb|94.4Mb|57.9Mb|[link](docs/experiments/mobilenetv2/exp1)| +|ResNet50|214Mb|216Mb|107.2Mb|[link](docs/experiments/resnet50/exp1)| - Feature Selection Comparison in Size -|Backbone|Feature Names|log|pb|tflite| -|---|---|---|---|---| -|MobileNetV1|"conv_pw_1_relu",
"conv_pw_3_relu",
"conv_pw_5_relu",
"conv_pw_7_relu",
"conv_pw_13_relu"|102.1Mb|86.7Mb|50.2Mb| -|MobileNetV1|"conv_pw_1_relu",
"conv_pw_3_relu",
"conv_pw_5_relu",
"conv_pw_7_relu",
"conv_pw_12_relu"|84.5Mb|82.3Mb|49.2Mb| +|Backbone|Feature Names|log|pb|tflite|toml| +|---|---|---|---|---|---| +|MobileNetV1|"conv_pw_1_relu",
"conv_pw_3_relu",
"conv_pw_5_relu",
"conv_pw_7_relu",
"conv_pw_13_relu"|102.1Mb|86.7Mb|50.2Mb|[link](docs/experiments/mobilenetv1/exp1)| +|MobileNetV1|"conv_pw_1_relu",
"conv_pw_3_relu",
"conv_pw_5_relu",
"conv_pw_7_relu",
"conv_pw_12_relu"|84.5Mb|82.3Mb|49.2Mb|[link](docs/experiments/mobilenetv1/exp2)| - Feature Channels Comparison in Size -|Backbone|Channels|log|pb|tflite| -|---|---|---|---|---| -|VGG16|[256,128,64,32]|130.5Mb|119Mb|45.3Mb| -|VGG16|[128,64,32,16]|82.4Mb|81.6Mb|27.3Mb| -|VGG16|[32,32,32,32]|73.2Mb|67.5Mb|18.1Mb| +|Backbone|Channels|log|pb|tflite|toml| +|---|---|---|---|---|---| +|VGG16|[256,128,64,32]|130.5Mb|119Mb|45.3Mb|[link](docs/experiments/vgg16/exp1)| +|VGG16|[128,64,32,16]|82.4Mb|81.6Mb|27.3Mb|| +|VGG16|[32,32,32,32]|73.2Mb|67.5Mb|18.1Mb|[link](docs/experiments/vgg16/exp2)| - tfmot - Pruning (not working) diff --git a/deepfloorplan.ipynb b/deepfloorplan.ipynb index ab90fdf..f63ca68 100644 --- a/deepfloorplan.ipynb +++ b/deepfloorplan.ipynb @@ -74,6 +74,7 @@ "from dfp.utils.rgb_ind_convertor import *\n", "from dfp.utils.util import *\n", "from dfp.utils.legend import *\n", + "from dfp.utils.settings import *\n", "from dfp.deploy import *\n", "print(tf.test.is_gpu_available())\n", "print(tf.config.list_physical_devices('GPU'))" @@ -87,7 +88,11 @@ }, "outputs": [], "source": [ - "inp = mpimg.imread('./TF2DeepFloorplan/resources/30939153.jpg')" + "img_path = './TF2DeepFloorplan/resources/30939153.jpg'\n", + "inp = mpimg.imread(img_path)\n", + "args = parse_args(\"--tomlfile ./TF2DeepFloorplan/docs/notebook.toml\".split())\n", + "args = overwrite_args_with_toml(args)\n", + "args.image = img_path" ] }, { @@ -102,10 +107,6 @@ }, "outputs": [], "source": [ - "args = Namespace(image='./TF2DeepFloorplan/resources/30939153.jpg',\n", - " weight='./log/store/G',loadmethod='log',\n", - " postprocess=True,colorize=True,\n", - " save=None,tfmodel=\"subclass\")\n", "result = main(args)" ] }, diff --git a/docs/app.toml b/docs/app.toml new file mode 100644 index 0000000..e1dd0b5 --- /dev/null +++ b/docs/app.toml @@ -0,0 +1,10 @@ +tfmodel = 'subclass' +image = '' +postprocess = 1 +colorize = 1 +save = '' +weight = 'log/store/G' +loadmethod = 'log' +feature_channels = [256, 128, 64, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] \ No newline at end of file diff --git a/docs/experiments/mobilenetv1/exp1/compress.toml b/docs/experiments/mobilenetv1/exp1/compress.toml new file mode 100644 index 0000000..913db4f --- /dev/null +++ b/docs/experiments/mobilenetv1/exp1/compress.toml @@ -0,0 +1,9 @@ +tfmodel = 'func' +modeldir = 'model/store_mobilenetv1_exp1' +tflitedir = 'model/store_mobilenetv1_exp1_model.tflite' +quantize = 1 +compress_mode = 'quantization' +loadmethod = 'pb' +feature_channels = [256, 128, 64, 32] +backbone = 'mobilenetv1' +feature_names = ["conv_pw_1_relu", "conv_pw_3_relu", "conv_pw_5_relu", "conv_pw_7_relu", "conv_pw_13_relu",] diff --git a/docs/experiments/mobilenetv1/exp1/deploy.toml b/docs/experiments/mobilenetv1/exp1/deploy.toml new file mode 100644 index 0000000..39ce21d --- /dev/null +++ b/docs/experiments/mobilenetv1/exp1/deploy.toml @@ -0,0 +1,14 @@ +tfmodel = 'func' +image = 'resources/30939153.jpg' +postprocess = 1 +colorize = 1 +save = '' +weight = 'log/store_mobilenetv1_exp1/G' +loadmethod = 'log' +# weight = 'model/store_mobilenetv1_exp1' +# loadmethod = 'pb' +# weight = 'model/store_mobilenetv1_exp1_model.tflite' +# loadmethod = 'tflite' +feature_channels = [256, 128, 64, 32] +backbone = 'mobilenetv1' +feature_names = ["conv_pw_1_relu", "conv_pw_3_relu", "conv_pw_5_relu", "conv_pw_7_relu", "conv_pw_13_relu",] diff --git a/docs/experiments/mobilenetv1/exp1/train.toml b/docs/experiments/mobilenetv1/exp1/train.toml new file mode 100644 index 0000000..ef0686c --- /dev/null +++ b/docs/experiments/mobilenetv1/exp1/train.toml @@ -0,0 +1,13 @@ +tfmodel = 'func' +batchsize = 8 +lr = 1e-4 +wd = 1e-5 +epochs = 100 +logdir = 'log/store_mobilenetv1_exp1' +modeldir = 'model/store_mobilenetv1_exp1' +weight = '' +save_tensor_interval = 10 +save_model_interval = 20 +feature_channels = [256, 128, 64, 32] +backbone = 'mobilenetv1' +feature_names = ["conv_pw_1_relu", "conv_pw_3_relu", "conv_pw_5_relu", "conv_pw_7_relu", "conv_pw_13_relu",] \ No newline at end of file diff --git a/docs/experiments/mobilenetv1/exp2/compress.toml b/docs/experiments/mobilenetv1/exp2/compress.toml new file mode 100644 index 0000000..47435dd --- /dev/null +++ b/docs/experiments/mobilenetv1/exp2/compress.toml @@ -0,0 +1,9 @@ +tfmodel = 'func' +modeldir = 'model/store_mobilenetv1_exp2' +tflitedir = 'model/store_mobilenetv1_exp2_model.tflite' +quantize = 1 +compress_mode = 'quantization' +loadmethod = 'pb' +feature_channels = [256, 128, 64, 32] +backbone = 'mobilenetv1' +feature_names = ["conv_pw_1_relu", "conv_pw_3_relu", "conv_pw_5_relu", "conv_pw_7_relu", "conv_pw_12_relu",] diff --git a/docs/experiments/mobilenetv1/exp2/deploy.toml b/docs/experiments/mobilenetv1/exp2/deploy.toml new file mode 100644 index 0000000..c78f855 --- /dev/null +++ b/docs/experiments/mobilenetv1/exp2/deploy.toml @@ -0,0 +1,14 @@ +tfmodel = 'func' +image = 'resources/30939153.jpg' +postprocess = 1 +colorize = 1 +save = '' +weight = 'log/store_mobilenetv1_exp2/G' +loadmethod = 'log' +# weight = 'model/store_mobilenetv1_exp2' +# loadmethod = 'pb' +# weight = 'model/store_mobilenetv1_exp2_model.tflite' +# loadmethod = 'tflite' +feature_channels = [256, 128, 64, 32] +backbone = 'mobilenetv1' +feature_names = ["conv_pw_1_relu", "conv_pw_3_relu", "conv_pw_5_relu", "conv_pw_7_relu", "conv_pw_12_relu",] diff --git a/docs/experiments/mobilenetv1/exp2/train.toml b/docs/experiments/mobilenetv1/exp2/train.toml new file mode 100644 index 0000000..f2cc469 --- /dev/null +++ b/docs/experiments/mobilenetv1/exp2/train.toml @@ -0,0 +1,13 @@ +tfmodel = 'func' +batchsize = 8 +lr = 1e-4 +wd = 1e-5 +epochs = 100 +logdir = 'log/store_mobilenetv1_exp2' +modeldir = 'model/store_mobilenetv1_exp2' +weight = '' +save_tensor_interval = 10 +save_model_interval = 20 +feature_channels = [256, 128, 64, 32] +backbone = 'mobilenetv1' +feature_names = ["conv_pw_1_relu", "conv_pw_3_relu", "conv_pw_5_relu", "conv_pw_7_relu", "conv_pw_12_relu",] \ No newline at end of file diff --git a/docs/experiments/mobilenetv2/exp1/train.toml b/docs/experiments/mobilenetv2/exp1/train.toml new file mode 100644 index 0000000..bf9ad78 --- /dev/null +++ b/docs/experiments/mobilenetv2/exp1/train.toml @@ -0,0 +1,13 @@ +tfmodel = 'func' +batchsize = 8 +lr = 1e-4 +wd = 1e-5 +epochs = 100 +logdir = 'log/store_mobilenetv2_exp1' +modeldir = 'model/store_mobilenetv2_exp1' +weight = '' +save_tensor_interval = 10 +save_model_interval = 20 +feature_channels = [256, 128, 64, 32] +backbone = 'mobilenetv2' +feature_names = ["block_1_expand_relu", "block_3_expand_relu", "block_5_expand_relu", "block_13_expand_relu", "out_relu",] \ No newline at end of file diff --git a/docs/experiments/resnet50/exp1/train.toml b/docs/experiments/resnet50/exp1/train.toml new file mode 100644 index 0000000..ce21237 --- /dev/null +++ b/docs/experiments/resnet50/exp1/train.toml @@ -0,0 +1,13 @@ +tfmodel = 'func' +batchsize = 8 +lr = 1e-4 +wd = 1e-5 +epochs = 100 +logdir = 'log/store_resnet50_exp1' +modeldir = 'model/store_resnet50_exp1' +weight = '' +save_tensor_interval = 10 +save_model_interval = 20 +feature_channels = [256, 128, 64, 32] +backbone = 'resnet50' +feature_names = ["conv1_relu", "conv2_block3_out", "conv3_block4_out", "conv4_block6_out", "conv5_block3_out",] \ No newline at end of file diff --git a/docs/experiments/vgg16/exp1/compress.toml b/docs/experiments/vgg16/exp1/compress.toml new file mode 100644 index 0000000..31cfe1e --- /dev/null +++ b/docs/experiments/vgg16/exp1/compress.toml @@ -0,0 +1,9 @@ +tfmodel = 'subclass' +modeldir = 'model/store_vgg16_exp1' +tflitedir = 'model/store_vgg16_exp1_model.tflite' +quantize = 1 +compress_mode = 'quantization' +loadmethod = 'pb' +feature_channels = [256, 128, 64, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] diff --git a/docs/experiments/vgg16/exp1/compress_log.toml b/docs/experiments/vgg16/exp1/compress_log.toml new file mode 100644 index 0000000..ee5be4c --- /dev/null +++ b/docs/experiments/vgg16/exp1/compress_log.toml @@ -0,0 +1,9 @@ +tfmodel = 'subclass' +modeldir = 'log/store_vgg16_exp1/G' +tflitedir = 'model/store_vgg16_exp1_model_log.tflite' +quantize = 1 +compress_mode = 'quantization' +loadmethod = 'log' +feature_channels = [256, 128, 64, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] diff --git a/docs/experiments/vgg16/exp1/deploy.toml b/docs/experiments/vgg16/exp1/deploy.toml new file mode 100644 index 0000000..99a7acd --- /dev/null +++ b/docs/experiments/vgg16/exp1/deploy.toml @@ -0,0 +1,14 @@ +tfmodel = 'subclass' +image = 'resources/30939153.jpg' +postprocess = 1 +colorize = 1 +save = '' +# weight = 'log/store_vgg16_exp1/G' +# loadmethod = 'log' +# weight = 'model/store_vgg16_exp1' +# loadmethod = 'pb' +weight = 'model/store_vgg16_exp1_model.tflite' +loadmethod = 'tflite' +feature_channels = [256, 128, 64, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] diff --git a/docs/experiments/vgg16/exp1/train.toml b/docs/experiments/vgg16/exp1/train.toml new file mode 100644 index 0000000..c8ba665 --- /dev/null +++ b/docs/experiments/vgg16/exp1/train.toml @@ -0,0 +1,13 @@ +tfmodel = 'subclass' +batchsize = 8 +lr = 1e-4 +wd = 1e-5 +epochs = 100 +logdir = 'log/store_vgg16_exp1' +modeldir = 'model/store_vgg16_exp1' +weight = '' +save_tensor_interval = 10 +save_model_interval = 20 +feature_channels = [256, 128, 64, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] \ No newline at end of file diff --git a/docs/experiments/vgg16/exp2/compress.toml b/docs/experiments/vgg16/exp2/compress.toml new file mode 100644 index 0000000..d589636 --- /dev/null +++ b/docs/experiments/vgg16/exp2/compress.toml @@ -0,0 +1,9 @@ +tfmodel = 'subclass' +modeldir = 'model/store_vgg16_exp2' +tflitedir = 'model/store_vgg16_exp2_model.tflite' +quantize = 1 +compress_mode = 'quantization' +loadmethod = 'pb' +feature_channels = [32, 32, 32, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] diff --git a/docs/experiments/vgg16/exp2/deploy.toml b/docs/experiments/vgg16/exp2/deploy.toml new file mode 100644 index 0000000..2c4b0ff --- /dev/null +++ b/docs/experiments/vgg16/exp2/deploy.toml @@ -0,0 +1,14 @@ +tfmodel = 'subclass' +image = 'resources/30939153.jpg' +postprocess = 1 +colorize = 1 +save = '' +weight = 'log/store_vgg16_exp2/G' +loadmethod = 'log' +# weight = 'model/store_vgg16_exp2' +# loadmethod = 'pb' +# weight = 'model/store_vgg16_exp2_model.tflite' +# loadmethod = 'tflite' +feature_channels = [32, 32, 32, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] diff --git a/docs/experiments/vgg16/exp2/train.toml b/docs/experiments/vgg16/exp2/train.toml new file mode 100644 index 0000000..ed75cb3 --- /dev/null +++ b/docs/experiments/vgg16/exp2/train.toml @@ -0,0 +1,13 @@ +tfmodel = 'subclass' +batchsize = 8 +lr = 1e-4 +wd = 1e-5 +epochs = 100 +logdir = 'log/store_vgg16_exp2' +modeldir = 'model/store_vgg16_exp2' +weight = '' +save_tensor_interval = 10 +save_model_interval = 20 +feature_channels = [32, 32, 32, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] \ No newline at end of file diff --git a/docs/experiments/vgg16/exp3/compress.toml b/docs/experiments/vgg16/exp3/compress.toml new file mode 100644 index 0000000..360f31f --- /dev/null +++ b/docs/experiments/vgg16/exp3/compress.toml @@ -0,0 +1,9 @@ +tfmodel = 'func' +modeldir = 'model/store_vgg16_exp3' +tflitedir = 'model/store_vgg16_exp3_model.tflite' +quantize = 1 +compress_mode = 'quantization' +loadmethod = 'pb' +feature_channels = [256, 128, 64, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] diff --git a/docs/experiments/vgg16/exp3/deploy.toml b/docs/experiments/vgg16/exp3/deploy.toml new file mode 100644 index 0000000..98051cc --- /dev/null +++ b/docs/experiments/vgg16/exp3/deploy.toml @@ -0,0 +1,14 @@ +tfmodel = 'func' +image = 'resources/30939153.jpg' +postprocess = 1 +colorize = 1 +save = '' +# weight = 'log/store_vgg16_exp3/G' +# loadmethod = 'log' +weight = 'model/store_vgg16_exp3' +loadmethod = 'pb' +# weight = 'model/store_vgg16_exp3_model.tflite' +# loadmethod = 'tflite' +feature_channels = [256, 128, 64, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] diff --git a/docs/experiments/vgg16/exp3/train.toml b/docs/experiments/vgg16/exp3/train.toml new file mode 100644 index 0000000..432de37 --- /dev/null +++ b/docs/experiments/vgg16/exp3/train.toml @@ -0,0 +1,13 @@ +tfmodel = 'func' +batchsize = 8 +lr = 1e-4 +wd = 1e-5 +epochs = 100 +logdir = 'log/store_vgg16_exp3' +modeldir = 'model/store_vgg16_exp3' +weight = '' +save_tensor_interval = 10 +save_model_interval = 20 +feature_channels = [256, 128, 64, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] \ No newline at end of file diff --git a/docs/notebook.toml b/docs/notebook.toml new file mode 100644 index 0000000..524f11a --- /dev/null +++ b/docs/notebook.toml @@ -0,0 +1,10 @@ +tfmodel = 'subclass' +image = './TF2DeepFloorplan/resources/30939153.jpg' +postprocess = 1 +colorize = 1 +save = '' +weight = './log/store/G' +loadmethod = 'log' +feature_channels = [256, 128, 64, 32] +backbone = 'vgg16' +feature_names = ["block1_pool", "block2_pool", "block3_pool", "block4_pool", "block5_pool",] \ No newline at end of file diff --git a/out.jpg b/out.jpg new file mode 100644 index 0000000..4f1f15a Binary files /dev/null and b/out.jpg differ diff --git a/requirements.txt b/requirements.txt index 77e5272..b3e3c96 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,4 @@ protobuf==3.20.0 chardet types-requests pytype +dynaconf diff --git a/src/dfp/app.py b/src/dfp/app.py index 836c9a1..e1be974 100644 --- a/src/dfp/app.py +++ b/src/dfp/app.py @@ -8,11 +8,17 @@ import requests from flask import Flask, request, send_file from werkzeug.datastructures import FileStorage -from werkzeug.local import LocalProxy from .deploy import main +from .utils.settings import overwrite_args_with_toml app = Flask(__name__) +app.config["UPLOAD_EXTENSIONS"] = [".jpg", ".png", ".jpeg"] + +args = Namespace(tomlfile="docs/app.toml") +args = overwrite_args_with_toml(args) +finname = "resources/30939153.jpg" +output = "/tmp" def saveStreamFile(stream: FileStorage, fnum: str): @@ -24,95 +30,76 @@ def saveStreamURI(stream: bytes, fnum: str): handler.write(stream) -def parsePostprocess(request: LocalProxy) -> bool: - postprocess = True - # postprocess - if "postprocess" in request.form.keys(): - postprocess = bool(int(request.form.getlist("postprocess")[0])) - - if request.json and "postprocess" in request.json.keys(): - postprocess = bool(request.json["postprocess"]) - return postprocess - - -def parseColorize(request: LocalProxy) -> bool: - colorize = True - # colorize - if "colorize" in request.form.keys(): - colorize = bool(int(request.form.getlist("colorize")[0])) +@app.route("/") +def home(): + return {"message": "Hello Flask!"} - if request.json and "colorize" in request.json.keys(): - colorize = bool(request.json["colorize"]) - return colorize +@app.route("/upload", methods=["POST"]) +def dummy(): + finname = "resources/30939153.jpg" + fnum = str(random.randint(0, 10000)) + foutname = fnum + "-out.jpg" + if "file" in request.files: + saveStreamFile(request.files["file"], fnum) + finname = fnum + ".jpg" -def parseOutputDir(request: LocalProxy) -> str: - output = "/tmp" - # output path - if "output" in request.form.keys(): - output = str(request.form.getlist("output")[0]).strip() + postprocess = ( + False + if "postprocess" not in request.form.keys() + else bool(int(request.form.getlist("postprocess")[0])) + ) + colorize = ( + False + if "colorize" not in request.form.keys() + else bool(int(request.form.getlist("colorize")[0])) + ) - if request.json and "output" in request.json.keys(): - output = str(request.json["output"]) - return output + args.image = finname + args.postprocess = postprocess + args.colorize = colorize + args.save = os.path.join(output, foutname) + app.logger.info(args) + with mp.Pool() as pool: + result = pool.map(main, [args])[0] + app.logger.info(f"Output Image shape: {np.array(result).shape}") + if args.save: + mpimg.imsave(args.save, np.array(result).astype(np.uint8)) -@app.route("/") -def home(): - return {"message": "Hello Flask!"} + try: + callback = send_file( + os.path.join(output, foutname), mimetype="image/jpg" + ) + return callback, 200 + except Exception: + return {"message": "send error"}, 400 + finally: + os.system("rm " + os.path.join(output, foutname)) + if finname != "resources/30939153.jpg": + os.system("rm " + finname) + return {"message": "hello"} -@app.route("/process", methods=["POST"]) +@app.route("/uri", methods=["POST"]) def process_image(): fnum = str(random.randint(0, 10000)) finname = "resources/30939153.jpg" foutname = fnum + "-out.jpg" - output = "/tmp" - - # input image: either local file or uri - if "file" in request.files: - print("File mode...") - try: - saveStreamFile(request.files["file"], fnum) - finname = fnum + ".jpg" - print("files: ", request.files) - print(request.files["file"]) - args = Namespace( - image=finname, - weight="log/store/G", - loadmethod="log", - postprocess=True, - colorize=True, - tfmodel="subclass", - save=os.path.join(output, foutname), - ) - print(args) - - with mp.Pool() as pool: - result = pool.map(main, [args])[0] - - print("Output Image shape: ", np.array(result).shape) - - if args.save: - mpimg.imsave(args.save, np.array(result).astype(np.uint8)) - - try: - callback = send_file( - os.path.join(output, foutname), mimetype="image/jpg" - ) - return callback, 200 - except Exception: - return {"message": "send error"}, 400 - finally: - os.system("rm " + os.path.join(output, foutname)) - if finname != "resources/30939153.jpg": - os.system("rm " + finname) - - except Exception: - return {"message": "input error"}, 400 + postprocess = ( + bool(request.json["postprocess"]) + if request.json and "postprocess" in request.json.keys() + else False + ) + colorize = ( + bool(request.json["colorize"]) + if request.json and "colorize" in request.json.keys() + else False + ) + # input image: uri if request.json and "uri" in request.json.keys(): - print("URI mode...") + app.logger.info("URI mode...") uri = request.json["uri"] try: data = requests.get(uri).content @@ -121,25 +108,16 @@ def process_image(): except Exception: return {"message": "input error"}, 400 - postprocess = parsePostprocess(request) - colorize = parseColorize(request) - output = parseOutputDir(request) - - args = Namespace( - image=finname, - weight="log/store/G", - loadmethod="log", - postprocess=postprocess, - colorize=colorize, - tfmodel="subclass", - save=os.path.join(output, foutname), - ) - print(args) + args.image = finname + args.postprocess = postprocess + args.colorize = colorize + args.save = os.path.join(output, foutname) + app.logger.info(args) with mp.Pool() as pool: result = pool.map(main, [args])[0] - print("Output Image shape: ", np.array(result).shape) + app.logger.info(f"Output Image shape: {np.array(result).shape}") if args.save: mpimg.imsave(args.save, np.array(result).astype(np.uint8)) diff --git a/src/dfp/convert2tflite.py b/src/dfp/convert2tflite.py index ea505a0..ae7d410 100644 --- a/src/dfp/convert2tflite.py +++ b/src/dfp/convert2tflite.py @@ -9,8 +9,10 @@ from tqdm import tqdm from .data import decodeAllRaw, loadDataset, preprocess +from .net import deepfloorplanModel from .net_func import deepfloorplanFunc from .train import train_step +from .utils.settings import overwrite_args_with_toml from .utils.util import ( print_model_weight_clusters, print_model_weights_sparsity, @@ -19,7 +21,12 @@ def model_init(config: argparse.Namespace) -> tf.keras.Model: if config.loadmethod == "log": - base_model = deepfloorplanFunc() + if config.tfmodel == "subclass": + base_model = deepfloorplanModel(config=config) + base_model.build((1, 512, 512, 3)) + assert True, "subclass and log are not convertible to tflite." + elif config.tfmodel == "func": + base_model = deepfloorplanFunc(config=config) base_model.load_weights(config.modeldir) elif config.loadmethod == "pb": base_model = tf.keras.models.load_model(config.modeldir) @@ -64,8 +71,35 @@ def parse_args(args: List[str]) -> argparse.Namespace: "--loadmethod", type=str, default="log", - choices=["log", "tflite", "pb", "none"], + choices=["log", "pb", "none"], ) # log,tflite,pb + p.add_argument( + "--feature-channels", + type=int, + action="store", + default=[256, 128, 64, 32], + nargs=4, + ) + p.add_argument( + "--backbone", + type=str, + default="vgg16", + choices=["vgg16", "resnet50", "mobilenetv1", "mobilenetv2"], + ) + p.add_argument( + "--feature-names", + type=str, + action="store", + nargs=5, + default=[ + "block1_pool", + "block2_pool", + "block3_pool", + "block4_pool", + "block5_pool", + ], + ) + p.add_argument("--tomlfile", type=str, default=None) return p.parse_args(args) @@ -216,11 +250,13 @@ def apply_quantization_to_conv2D(layer): if __name__ == "__main__": args = parse_args(sys.argv[1:]) + args = overwrite_args_with_toml(args) + print(args) if args.compress_mode == "quantization" or args.quantize: # quantization_aware_training(args) converter(args) - if args.tfmodel == "func": + elif args.tfmodel == "func": if args.compress_mode == "prune": prune(args) elif args.compress_mode == "cluster": diff --git a/src/dfp/deploy.py b/src/dfp/deploy.py index 0a917b6..dada9b6 100644 --- a/src/dfp/deploy.py +++ b/src/dfp/deploy.py @@ -17,6 +17,7 @@ floorplan_fuse_map, ind2rgb, ) +from .utils.settings import overwrite_args_with_toml from .utils.util import fill_break_line, flood_fill, refine_room_region os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" @@ -26,9 +27,9 @@ def init( config: argparse.Namespace, ) -> Tuple[tf.keras.Model, tf.Tensor, np.ndarray]: if config.tfmodel == "subclass": - model = deepfloorplanModel() + model = deepfloorplanModel(config=config) elif config.tfmodel == "func": - model = deepfloorplanFunc() + model = deepfloorplanFunc(config=config) if config.loadmethod == "log": model.load_weights(config.weight) elif config.loadmethod == "pb": @@ -36,12 +37,14 @@ def init( elif config.loadmethod == "tflite": model = tf.lite.Interpreter(model_path=config.weight) model.allocate_tensors() - img = mpimg.imread(config.image) + img = mpimg.imread(config.image)[:, :, :3] shp = img.shape img = tf.convert_to_tensor(img, dtype=tf.uint8) img = tf.image.resize(img, [512, 512]) img = tf.cast(img, dtype=tf.float32) - img = tf.reshape(img, [-1, 512, 512, 3]) / 255 + img = tf.reshape(img, [-1, 512, 512, 3]) + if tf.math.reduce_max(img) > 1.0: + img /= 255 if config.loadmethod == "tflite": return model, img, shp model.trainable = False @@ -196,6 +199,33 @@ def parse_args(args: List[str]) -> argparse.Namespace: choices=["log", "tflite", "pb", "none"], ) # log,tflite,pb p.add_argument("--save", type=str) + p.add_argument( + "--feature-channels", + type=int, + action="store", + default=[256, 128, 64, 32], + nargs=4, + ) + p.add_argument( + "--backbone", + type=str, + default="vgg16", + choices=["vgg16", "resnet50", "mobilenetv1", "mobilenetv2"], + ) + p.add_argument( + "--feature-names", + type=str, + action="store", + nargs=5, + default=[ + "block1_pool", + "block2_pool", + "block3_pool", + "block4_pool", + "block5_pool", + ], + ) + p.add_argument("--tomlfile", type=str, default=None) return p.parse_args(args) @@ -209,6 +239,7 @@ def deploy_plot_res(result: np.ndarray): if __name__ == "__main__": args = parse_args(sys.argv[1:]) + args = overwrite_args_with_toml(args) result = main(args) deploy_plot_res(result) plt.show() diff --git a/src/dfp/net.py b/src/dfp/net.py index 2a58a13..38b183c 100644 --- a/src/dfp/net.py +++ b/src/dfp/net.py @@ -68,10 +68,23 @@ def up_bilinear(dim: int) -> tf.keras.Sequential: class deepfloorplanModel(Model): def __init__(self, config: argparse.Namespace = None): super(deepfloorplanModel, self).__init__() + self.config = config + dimlist = [256, 128, 64, 32] + self.feature_names = [ + "block1_pool", + "block2_pool", + "block3_pool", + "block4_pool", + "block5_pool", + ] + if config is not None: + dimlist = config.feature_channels + assert ( + config.backbone == "vgg16" + ), "subclass backbone must be vgg16" + self.feature_names = config.feature_names self._vgg16init() # room boundary prediction (rbp) - # dimlist = [256, 128, 64, 32] - dimlist = [32, 32, 32, 32] self.rbpups = [upconv2d(dim=d, act="linear") for d in dimlist] self.rbpcv1 = [conv2d(dim=d, act="linear") for d in dimlist] self.rbpcv2 = [conv2d(dim=d) for d in dimlist] @@ -220,7 +233,7 @@ def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: feature = x for layer in self.vgg16.layers: feature = layer(feature) - if layer.name.find("pool") != -1: + if layer.name in self.feature_names: features.append(feature) x = feature features = features[::-1] diff --git a/src/dfp/net_func.py b/src/dfp/net_func.py index 5f28863..289dcf8 100644 --- a/src/dfp/net_func.py +++ b/src/dfp/net_func.py @@ -1,3 +1,4 @@ +import argparse from typing import Tuple import numpy as np @@ -18,84 +19,64 @@ from tensorflow.keras.models import Model -def resnet50_backbone(x): - layer_names = [ - "conv1_relu", # 256x256x64 - "conv2_block3_out", # 128x128x256 - "conv3_block4_out", # 64x64x512 - "conv4_block6_out", # 32x32x1024 - "conv5_block3_out", # 16x16x2048 - ] +def resnet50_backbone(x, feature_names): backbone = ResNet50(weights="imagenet", include_top=False, input_tensor=x) backbone = Model( - inputs=x, outputs=backbone.get_layer(layer_names[-1]).output + inputs=x, outputs=backbone.get_layer(feature_names[-1]).output ) for layer in backbone.layers: layer.trainable = False features = [] - for layer in backbone.layers: - if layer.name in layer_names: - features.append(backbone.get_layer(layer.name).output) + for layer in feature_names: + features.append(backbone.get_layer(layer).output) features = features[::-1] return features -def mobilenet_backbone(x): - layer_names = [ - "conv_pw_1_relu", # 256x256x64 - "conv_pw_3_relu", # 128x128x128 - "conv_pw_5_relu", # 64x64x256 - "conv_pw_7_relu", # 32x32x512 - # "conv_pw_13_relu", # 16x16x1024 - "conv_pw_12_relu", # 16x16x1024 - ] +def mobilenet_backbone(x, feature_names): backbone = MobileNet(weights="imagenet", include_top=False, input_tensor=x) backbone = Model( - inputs=x, outputs=backbone.get_layer(layer_names[-1]).output + inputs=x, outputs=backbone.get_layer(feature_names[-1]).output ) for layer in backbone.layers: layer.trainable = False features = [] - for layer in backbone.layers: - if layer.name in layer_names: - features.append(backbone.get_layer(layer.name).output) + for layer in feature_names: + features.append(backbone.get_layer(layer).output) features = features[::-1] return features -def mobilenetv2_backbone(x): - layer_names = [ - "block_1_expand_relu", # 256x256x96 - "block_3_expand_relu", # 128x128x144 - "block_5_expand_relu", # 64x64x192 - "block_13_expand_relu", # 32x32x576 - "out_relu", # 16x16x1280 - ] +def mobilenetv2_backbone(x, feature_names): backbone = MobileNetV2( weights="imagenet", include_top=False, input_tensor=x ) + backbone = Model( + inputs=x, outputs=backbone.get_layer(feature_names[-1]).output + ) for layer in backbone.layers: layer.trainable = False features = [] - for layer in backbone.layers: - if layer.name in layer_names: - features.append(backbone.get_layer(layer.name).output) + for layer in feature_names: + features.append(backbone.get_layer(layer).output) features = features[::-1] return features -def vgg16_backbone(x): +def vgg16_backbone(x, feature_names): backbone = VGG16(weights="imagenet", include_top=False, input_tensor=x) + backbone = Model( + inputs=x, outputs=backbone.get_layer(feature_names[-1]).output + ) for layer in backbone.layers: layer.trainable = False features = [] - for layer in backbone.layers: - if layer.name.find("pool") != -1: - features.append(backbone.get_layer(layer.name).output) + for layer in feature_names: + features.append(backbone.get_layer(layer).output) features = features[::-1] return features @@ -175,14 +156,33 @@ def attention(xf, x_, rbdim): return non_local_context(xf, x_, rbdim) -def deepfloorplanFunc(): +def deepfloorplanFunc(config: argparse.Namespace = None): inp = Input([512, 512, 3]) - features = vgg16_backbone(inp) - # features = resnet50_backbone(inp) + if config is None: + rbdims = [256, 128, 64, 32] + features = vgg16_backbone( + inp, + [ + "block1_pool", + "block2_pool", + "block3_pool", + "block4_pool", + "block5_pool", + ], + ) + elif config is not None: + rbdims = config.feature_channels + if config.backbone == "resnet50": + features = resnet50_backbone(inp, config.feature_names) + elif config.backbone == "vgg16": + features = vgg16_backbone(inp, config.feature_names) + elif config.backbone == "mobilenetv1": + features = mobilenet_backbone(inp, config.feature_names) + elif config.backbone == "mobilenetv2": + features = mobilenetv2_backbone(inp, config.feature_names) + assert len(features) == 5, "Not enough 5 features..." features_room_boundary = [] - # rbdims = [256, 128, 64, 32] - rbdims = [128, 64, 32, 16] x = features[0] for i in range(len(rbdims)): x = Conv2DTranspose(rbdims[i], 4, strides=2, padding="same")(x) diff --git a/src/dfp/train.py b/src/dfp/train.py index aaf0653..7b71288 100644 --- a/src/dfp/train.py +++ b/src/dfp/train.py @@ -18,6 +18,7 @@ from .loss import balanced_entropy, cross_two_tasks_weight from .net import deepfloorplanModel from .net_func import deepfloorplanFunc +from .utils.settings import overwrite_args_with_toml os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" @@ -27,9 +28,9 @@ def init( ) -> Tuple[tf.data.Dataset, tf.keras.Model, tf.keras.optimizers.Optimizer]: dataset = loadDataset() if config.tfmodel == "subclass": - model = deepfloorplanModel() + model = deepfloorplanModel(config=config) elif config.tfmodel == "func": - model = deepfloorplanFunc() + model = deepfloorplanFunc(config=config) os.system(f"mkdir -p {config.modeldir}") if config.weight: model.load_weights(config.weight) @@ -122,7 +123,7 @@ def main(config: argparse.Namespace): ) # plot progress - if pltiter % config.saveTensorInterval == 0: + if pltiter % config.save_tensor_interval == 0: f = image_grid(img, bound, room, logits_r, logits_cw) im = plot_to_image(f) with writer.as_default(): @@ -134,7 +135,7 @@ def main(config: argparse.Namespace): pltiter += 1 # save model - if epoch % config.saveModelInterval == 0: + if epoch % config.save_model_interval == 0: model.save_weights(config.logdir + "/G") model.save(config.modeldir) print("[INFO] Saving Model ...") @@ -152,12 +153,40 @@ def parse_args(args: List[str]) -> argparse.Namespace: p.add_argument("--logdir", type=str, default="log/store") p.add_argument("--modeldir", type=str, default="model/store") p.add_argument("--weight", type=str) - p.add_argument("--saveTensorInterval", type=int, default=10) - p.add_argument("--saveModelInterval", type=int, default=20) + p.add_argument("--save-tensor-interval", type=int, default=10) + p.add_argument("--save-model-interval", type=int, default=20) + p.add_argument("--tomlfile", type=str, default=None) + p.add_argument( + "--feature-channels", + type=int, + action="store", + default=[256, 128, 64, 32], + nargs=4, + ) + p.add_argument( + "--backbone", + type=str, + default="vgg16", + choices=["vgg16", "resnet50", "mobilenetv1", "mobilenetv2"], + ) + p.add_argument( + "--feature-names", + type=str, + action="store", + nargs=5, + default=[ + "block1_pool", + "block2_pool", + "block3_pool", + "block4_pool", + "block5_pool", + ], + ) return p.parse_args(args) if __name__ == "__main__": args = parse_args(sys.argv[1:]) + args = overwrite_args_with_toml(args) print(args) main(args) diff --git a/src/dfp/utils/settings.py b/src/dfp/utils/settings.py new file mode 100644 index 0000000..d12bcd2 --- /dev/null +++ b/src/dfp/utils/settings.py @@ -0,0 +1,14 @@ +import argparse +from argparse import Namespace + +from dynaconf import Dynaconf + + +def overwrite_args_with_toml(config: argparse.Namespace) -> argparse.Namespace: + if config.tomlfile is None: + return config + settings = Dynaconf( + envvar_prefix="DYNACONF", settings_files=[config.tomlfile] + ) + settings = dict((k.lower(), v) for k, v in settings.as_dict().items()) + return Namespace(**settings) diff --git a/tests/test_app.py b/tests/test_app.py index 093bb71..93fd0fb 100644 --- a/tests/test_app.py +++ b/tests/test_app.py @@ -10,7 +10,6 @@ from pytest_mock import MockFixture from dfp.app import app as create_app -from dfp.app import parseColorize, parseOutputDir, parsePostprocess class fakeMultiprocessing: @@ -88,15 +87,15 @@ def test_app_home(client: FlaskClient): assert resp.json.get("message", "Hello Flask!") -# def test_app_process_image(client: FlaskClient): -# resp = client.post("/process") -# assert resp.status_code == 400 +def test_app_process_image(client: FlaskClient): + resp = client.post("/upload") + assert resp.status_code == 200 def test_app_mock_process_empty(client: FlaskClient): headers: Dict[Any, Any] = {} data: Dict[Any, Any] = {} - resp = client.post("/process", headers=headers, json=data) + resp = client.post("/uri", headers=headers, json=data) assert resp.status_code == 200 assert resp.json.get("message", "success!") @@ -109,32 +108,14 @@ def test_app_mock_process_uri(client: FlaskClient): "colorize": 1, "output": "/tmp", } - resp = client.post("/process", headers=headers, json=data) + resp = client.post("/uri", headers=headers, json=data) os.system("rm *.jpg") assert resp.status_code == 200 assert resp.json.get("message", "success!") -# def test_app_mock_process_file(client: FlaskClient): -# files = {"file": (open("resources/30939153.jpg", "rb"), "30939153.jpg")} -# resp = client.post("/process", data=files) -# os.system("rm *.jpg") -# assert resp.status_code == 400 - - -def test_app_parsePostprocess(): - req = fakeRequest() - postprocess = parsePostprocess(req) - assert postprocess is False - - -def test_app_parseColorize(): - req = fakeRequest() - colorize = parseColorize(req) - assert colorize is False - - -def test_app_parseOutputDir(): - req = fakeRequest() - output = parseOutputDir(req) - assert output == "/tmp" +def test_app_mock_process_file(client: FlaskClient): + files = {"file": (open("resources/30939153.jpg", "rb"), "30939153.jpg")} + resp = client.post("/upload", data=files) + os.system("rm *.jpg") + assert resp.status_code == 200 diff --git a/tests/test_deploy.py b/tests/test_deploy.py index f95c7bf..ad9e7a6 100644 --- a/tests/test_deploy.py +++ b/tests/test_deploy.py @@ -127,7 +127,9 @@ def test_init_none(mocker: MockFixture): model = fakeModel() mocker.patch("dfp.deploy.deepfloorplanModel", return_value=model) mocker.patch("dfp.deploy.mpimg.imread", return_value=np.zeros([16, 16, 3])) - args = Namespace(loadmethod="none", image="", tfmodel="subclass") + args = parse_args( + '--loadmethod none --image "" --tfmodel subclass'.split() + ) model_, img, shp = init(args) assert shp == (16, 16, 3) @@ -136,8 +138,9 @@ def test_init_log(mocker: MockFixture): model = fakeModel() mocker.patch("dfp.deploy.deepfloorplanModel", return_value=model) mocker.patch("dfp.deploy.mpimg.imread", return_value=np.zeros([16, 16, 3])) - args = Namespace( - loadmethod="log", image="", weight="log/store/G", tfmodel="subclass" + args = parse_args( + """--loadmethod log --image "" +--weight log/store/G --tfmodel subclass""".split() ) model_, img, shp = init(args) assert shp == (16, 16, 3) @@ -148,8 +151,9 @@ def test_init_pb(mocker: MockFixture): mocker.patch("dfp.deploy.deepfloorplanModel", return_value=model) mocker.patch("dfp.deploy.mpimg.imread", return_value=np.zeros([16, 16, 3])) mocker.patch("dfp.deploy.tf.keras.models.load_model", return_value=model) - args = Namespace( - loadmethod="pb", image="", weight="model/store", tfmodel="subclass" + args = parse_args( + """--loadmethod pb --image "" +--weight model/store --tfmodel subclass""".split() ) model_, img, shp = init(args) assert shp == (16, 16, 3) @@ -160,11 +164,10 @@ def test_init_tflite(mocker: MockFixture): mocker.patch("dfp.deploy.deepfloorplanModel", return_value=model) mocker.patch("dfp.deploy.mpimg.imread", return_value=np.zeros([16, 16, 3])) mocker.patch("dfp.deploy.tf.lite.Interpreter", return_value=model) - args = Namespace( - loadmethod="tflite", - image="", - weight="model/store/model.tflite", - tfmodel="subclass", + args = parse_args( + """--loadmethod tflite --image \"\" +--weight model/store/model.tflite +--tfmodel subclass""".split() ) model_, img, shp = init(args) assert shp == (16, 16, 3)