Skip to content

Commit

Permalink
And the rest of the changes
Browse files Browse the repository at this point in the history
  • Loading branch information
ferrouswheel committed Nov 13, 2018
1 parent 1fc82eb commit af84eb9
Show file tree
Hide file tree
Showing 9 changed files with 271 additions and 261 deletions.
6 changes: 6 additions & 0 deletions .gitignore
Expand Up @@ -106,3 +106,9 @@ venv.bak/
.idea/
models/
snet.config
config/
mask_*.png
.vscode/

services/service_spec/segmentation_pb2.py
services/service_spec/segmentation_pb2_grpc.py
3 changes: 1 addition & 2 deletions Dockerfile
Expand Up @@ -11,10 +11,9 @@ RUN pip3.6 install -r mask_rcnn_requirements.txt

ADD requirements.txt /service_requirements.txt
RUN pip3.6 install -r service_requirements.txt
RUN apt-get install -y git && git clone https://github.com/waleedka/coco.git
RUN cd coco/PythonAPI && python3.6 setup.py build_ext install

ADD . /semantic-segmentation
WORKDIR /semantic-segmentation
RUN ./build_proto.sh

CMD ["python3.6", "run_service.py"]
22 changes: 17 additions & 5 deletions README.md
Expand Up @@ -4,9 +4,21 @@ This repository contains a SingularityNET service to do semantic segmentation on

Currently it just supports the Mask_RCNN approach, pulling in Matterport's Mask_RCNN implementation.

## TODO
## Setup

- [ ] Support other semantic segmentation algorithms/models.
- [ ] Publish information about the semantic classes each service knows about using
the metadata URI in the SingularityNET Agent contract.
- [ ] grpc model
Requires Python 3.6 and NodeJS/npm

```
pip install -r requirements.txt
git clone git@github.com:singnet/snet-cli.git
cd snet-cli
./scripts/blockchain install
pip install -e .
```

## Issues

If you get an ImportError ending with `cytoolz/functoolz.cpython-36m-x86_64-linux-gnu.so: undefined symbol: PyFPE_jbuf`,
this [can be solved](https://github.com/pytoolz/cytoolz/issues/120) with:

`pip install --no-cache-dir cytoolz`
54 changes: 0 additions & 54 deletions jsonrpc_client.py

This file was deleted.

6 changes: 2 additions & 4 deletions requirements.txt
@@ -1,6 +1,4 @@
pyyaml
aiohttp
jsonrpcserver
tqdm
snetd-alpha
tensorflow-gpu
tensorflow-gpu
scikit-image
7 changes: 6 additions & 1 deletion service.json
@@ -1,12 +1,17 @@
{
"name": "semantic-segmentation",
"service_spec": "service_spec/",
"service_spec": "services/service_spec/",
"organization": "SNET",
"path": "",
"price": 10000000,
"endpoint": "http://34.216.72.29:6206",
"tags": [],
"metadata": {
"description": ""
},
"networks": {
"42": {
"agentAddress": "0xe8A3c76D1cdb6f6a3f53D6d32cF00F88CBcca454"
}
}
}
173 changes: 139 additions & 34 deletions services/common.py
@@ -1,47 +1,152 @@
import asyncio
import argparse
import os.path
import time
import logging
import io
import os
import sys
import skimage
import warnings
import base64

from aiohttp import web
import skimage.io
from skimage import img_as_uint
import matplotlib.pyplot as plt
import PIL, PIL.Image

from services import registry
from multiprocessing import Pool

logging.basicConfig(level=10, format="%(asctime)s - [%(levelname)8s] - %(name)s - %(message)s")
log = logging.getLogger(__package__ + "." + __name__)

def common_parser(script_name):
parser = argparse.ArgumentParser(prog=script_name)
server_name = os.path.splitext(os.path.basename(script_name))[0]
parser.add_argument("--grpc-port", help="port to bind grpc services to", default=registry[server_name]['grpc'], type=int, required=False)
parser.add_argument("--json-rpc-port", help="port to bind jsonrpc services to", default=registry[server_name]['jsonrpc'], type=int,
required=False)
return parser
# COCO Class names
# Index of the class in the list is its ID.
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']


async def _start_json_rpc(runner, host, port):
await runner.setup()
site = web.TCPSite(runner, str(host), port)
await site.start()
ROOT_DIR = os.path.join(os.path.dirname(__file__), "..")

while True:
await asyncio.sleep(1)
def load_model():
import mrcnn.model as modellib
from mrcnn.config import Config

class InferenceConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"

def main_loop(grpc_serve_function, grpc_args, jsonrpc_handler, args):
server = None
if grpc_serve_function is not None:
server = grpc_serve_function(port=args.grpc_port, **grpc_args)
server.start()
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
#IMAGES_PER_GPU = 2
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
IMAGES_PER_GPU = 1

loop = asyncio.get_event_loop()
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
GPU_COUNT = 1

app = web.Application(loop=loop)
app.router.add_post('/', jsonrpc_handler)
runner = web.AppRunner(app)
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes

try:
loop.run_until_complete(_start_json_rpc(runner, host="127.0.0.1", port=args.json_rpc_port))
except KeyboardInterrupt:
if server is not None:
server.stop(0)
loop.run_until_complete(runner.cleanup())
config = InferenceConfig()

loop.close()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "models", "mask_rcnn_coco.h5")
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)

# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
log.info("Mask_RCNN weights loaded and model initialised")
return model

def init():
sys.path.append(os.path.join(ROOT_DIR, "mask_rcnn")) # To find local version of the library
# Import COCO config
sys.path.append(os.path.join(ROOT_DIR, "mask_rcnn/samples/coco/")) # To find local version

#config.display()


def fig2png_buffer(fig):
log.info("fig2png")
fig.canvas.draw()

buffer = io.BytesIO()

pilImage = PIL.Image.frombytes("RGB", fig.canvas.get_width_height(), fig.canvas.tostring_rgb())
pilImage.save(buffer, "PNG")
return buffer


def segment_image(img, visualize=False):
import tensorflow as tf
from keras import backend as K
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config)
K.set_session(sess)

init()

model = load_model()

# Run detection
results = model.detect([img], verbose=1)
r = results[0]

# We need to copy/serialise some of this data otherwise
# it will be lost when this spawned process finishes
if visualize:
log.info("visualize requested")
from mrcnn import visualize
# Visualize results
fig, ax = plt.subplots(1, figsize=plt.figaspect(img))
ax.set_axis_off()
fig.subplots_adjust(0, 0, 1, 1)

visualize.display_instances(img, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], ax=ax)
viz_img_buff = fig2png_buffer(fig)

r["resultImage"] = base64.b64encode(viz_img_buff.getvalue())

r['rois'] = r['rois'].tolist()
r['class_ids'] = r['class_ids'].tolist()
r['class_names'] = [class_names[i] for i in r['class_ids']]
r['known_classes'] = class_names
r['scores'] = r['scores'].tolist()
masks = r['masks']
r['masks'] = []
for i in range(masks.shape[2]):
# convert mask arrays into gray-scale pngs, then base64 encode them
buff = io.BytesIO()
# skimage annoying spams warnings when the mask is below a certain pixel area
# proportional to the image size.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
skimage.io.imsave(buff, img_as_uint(masks[:, :, i]))
b64img = base64.b64encode(buff.getvalue())
r['masks'].append(b64img)

del model
sess.close()
return r

0 comments on commit af84eb9

Please sign in to comment.