Skip to content

Commit

Permalink
Format code with black
Browse files Browse the repository at this point in the history
  • Loading branch information
wkentaro committed Jun 13, 2020
1 parent 19433fc commit 5c98084
Show file tree
Hide file tree
Showing 39 changed files with 1,297 additions and 1,059 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[flake8]
exclude = .anaconda3/*
ignore = W504
ignore = E203, E741, W503, W504
19 changes: 13 additions & 6 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ jobs:
env:
PYTHON_VERSION: ${{ matrix.PYTHON_VERSION }}
run: |
conda install -y python=$PYTHON_VERSION
conda install -q -y python=$PYTHON_VERSION
which python
python --version
pip --version
Expand All @@ -66,17 +66,17 @@ jobs:
run: |
if [ "${{ matrix.PYTEST_QT_API }}" = "pyside2" ]; then
if [ "${{ matrix.PYTHON_VERSION }}" = "2.7" ]; then
conda install -y 'pyside2!=5.12.4' -c conda-forge
conda install -q -y 'pyside2!=5.12.4' -c conda-forge
else
conda install -y pyside2 -c conda-forge
conda install -q -y pyside2 -c conda-forge
fi
elif [ "${{ matrix.PYTEST_QT_API }}" = "pyqt4v2" ]; then
conda install -y pyqt=4 -c conda-forge
conda install -q -y pyqt=4 -c conda-forge
else # pyqt5
conda install -y pyqt=5
conda install -q -y pyqt=5
fi
if [ "${{ matrix.os }}" != "windows-latest" ]; then
conda install -y help2man
conda install -q -y help2man
fi
pip install hacking pytest pytest-qt
Expand All @@ -91,6 +91,13 @@ jobs:
run: |
flake8 .
- name: Black
shell: bash -l {0}
if: matrix.os != 'windows-latest' && matrix.python-version != '2.7'
run: |
pip install black
black --check .
- name: Test with pytest
shell: bash -l {0}
if: matrix.os != 'windows-latest'
Expand Down
72 changes: 37 additions & 35 deletions examples/bbox_detection/labelme2voc.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,35 +10,36 @@

import imgviz
import labelme

try:
import lxml.builder
import lxml.etree
except ImportError:
print('Please install lxml:\n\n pip install lxml\n')
print("Please install lxml:\n\n pip install lxml\n")
sys.exit(1)


def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('input_dir', help='input annotated directory')
parser.add_argument('output_dir', help='output dataset directory')
parser.add_argument('--labels', help='labels file', required=True)
parser.add_argument("input_dir", help="input annotated directory")
parser.add_argument("output_dir", help="output dataset directory")
parser.add_argument("--labels", help="labels file", required=True)
parser.add_argument(
'--noviz', help='no visualization', action='store_true'
"--noviz", help="no visualization", action="store_true"
)
args = parser.parse_args()

if osp.exists(args.output_dir):
print('Output directory already exists:', args.output_dir)
print("Output directory already exists:", args.output_dir)
sys.exit(1)
os.makedirs(args.output_dir)
os.makedirs(osp.join(args.output_dir, 'JPEGImages'))
os.makedirs(osp.join(args.output_dir, 'Annotations'))
os.makedirs(osp.join(args.output_dir, "JPEGImages"))
os.makedirs(osp.join(args.output_dir, "Annotations"))
if not args.noviz:
os.makedirs(osp.join(args.output_dir, 'AnnotationsVisualization'))
print('Creating dataset:', args.output_dir)
os.makedirs(osp.join(args.output_dir, "AnnotationsVisualization"))
print("Creating dataset:", args.output_dir)

class_names = []
class_name_to_id = {}
Expand All @@ -47,42 +48,41 @@ def main():
class_name = line.strip()
class_name_to_id[class_name] = class_id
if class_id == -1:
assert class_name == '__ignore__'
assert class_name == "__ignore__"
continue
elif class_id == 0:
assert class_name == '_background_'
assert class_name == "_background_"
class_names.append(class_name)
class_names = tuple(class_names)
print('class_names:', class_names)
out_class_names_file = osp.join(args.output_dir, 'class_names.txt')
with open(out_class_names_file, 'w') as f:
f.writelines('\n'.join(class_names))
print('Saved class_names:', out_class_names_file)
print("class_names:", class_names)
out_class_names_file = osp.join(args.output_dir, "class_names.txt")
with open(out_class_names_file, "w") as f:
f.writelines("\n".join(class_names))
print("Saved class_names:", out_class_names_file)

for filename in glob.glob(osp.join(args.input_dir, '*.json')):
print('Generating dataset from:', filename)
for filename in glob.glob(osp.join(args.input_dir, "*.json")):
print("Generating dataset from:", filename)

label_file = labelme.LabelFile(filename=filename)

base = osp.splitext(osp.basename(filename))[0]
out_img_file = osp.join(
args.output_dir, 'JPEGImages', base + '.jpg')
out_xml_file = osp.join(
args.output_dir, 'Annotations', base + '.xml')
out_img_file = osp.join(args.output_dir, "JPEGImages", base + ".jpg")
out_xml_file = osp.join(args.output_dir, "Annotations", base + ".xml")
if not args.noviz:
out_viz_file = osp.join(
args.output_dir, 'AnnotationsVisualization', base + '.jpg')
args.output_dir, "AnnotationsVisualization", base + ".jpg"
)

img = labelme.utils.img_data_to_arr(label_file.imageData)
imgviz.io.imsave(out_img_file, img)

maker = lxml.builder.ElementMaker()
xml = maker.annotation(
maker.folder(),
maker.filename(base + '.jpg'),
maker.database(), # e.g., The VOC2007 Database
maker.filename(base + ".jpg"),
maker.database(), # e.g., The VOC2007 Database
maker.annotation(), # e.g., Pascal VOC2007
maker.image(), # e.g., flickr
maker.image(), # e.g., flickr
maker.size(
maker.height(str(img.shape[0])),
maker.width(str(img.shape[1])),
Expand All @@ -94,15 +94,17 @@ def main():
bboxes = []
labels = []
for shape in label_file.shapes:
if shape['shape_type'] != 'rectangle':
print('Skipping shape: label={label}, shape_type={shape_type}'
.format(**shape))
if shape["shape_type"] != "rectangle":
print(
"Skipping shape: label={label}, "
"shape_type={shape_type}".format(**shape)
)
continue

class_name = shape['label']
class_name = shape["label"]
class_id = class_names.index(class_name)

(xmin, ymin), (xmax, ymax) = shape['points']
(xmin, ymin), (xmax, ymax) = shape["points"]
# swap if min is larger than max.
xmin, xmax = sorted([xmin, xmax])
ymin, ymax = sorted([ymin, ymax])
Expand All @@ -112,7 +114,7 @@ def main():

xml.append(
maker.object(
maker.name(shape['label']),
maker.name(shape["label"]),
maker.pose(),
maker.truncated(),
maker.difficult(),
Expand All @@ -136,9 +138,9 @@ def main():
)
imgviz.io.imsave(out_viz_file, viz)

with open(out_xml_file, 'wb') as f:
with open(out_xml_file, "wb") as f:
f.write(lxml.etree.tostring(xml, pretty_print=True))


if __name__ == '__main__':
if __name__ == "__main__":
main()
106 changes: 51 additions & 55 deletions examples/instance_segmentation/labelme2coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,25 +18,25 @@
try:
import pycocotools.mask
except ImportError:
print('Please install pycocotools:\n\n pip install pycocotools\n')
print("Please install pycocotools:\n\n pip install pycocotools\n")
sys.exit(1)


def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('input_dir', help='input annotated directory')
parser.add_argument('output_dir', help='output dataset directory')
parser.add_argument('--labels', help='labels file', required=True)
parser.add_argument("input_dir", help="input annotated directory")
parser.add_argument("output_dir", help="output dataset directory")
parser.add_argument("--labels", help="labels file", required=True)
args = parser.parse_args()

if osp.exists(args.output_dir):
print('Output directory already exists:', args.output_dir)
print("Output directory already exists:", args.output_dir)
sys.exit(1)
os.makedirs(args.output_dir)
os.makedirs(osp.join(args.output_dir, 'JPEGImages'))
print('Creating dataset:', args.output_dir)
os.makedirs(osp.join(args.output_dir, "JPEGImages"))
print("Creating dataset:", args.output_dir)

now = datetime.datetime.now()

Expand All @@ -47,17 +47,13 @@ def main():
version=None,
year=now.year,
contributor=None,
date_created=now.strftime('%Y-%m-%d %H:%M:%S.%f'),
date_created=now.strftime("%Y-%m-%d %H:%M:%S.%f"),
),
licenses=[dict(
url=None,
id=0,
name=None,
)],
licenses=[dict(url=None, id=0, name=None,)],
images=[
# license, url, file_name, height, width, date_captured, id
],
type='instances',
type="instances",
annotations=[
# segmentation, area, iscrowd, image_id, bbox, category_id, id
],
Expand All @@ -71,46 +67,44 @@ def main():
class_id = i - 1 # starts with -1
class_name = line.strip()
if class_id == -1:
assert class_name == '__ignore__'
assert class_name == "__ignore__"
continue
class_name_to_id[class_name] = class_id
data['categories'].append(dict(
supercategory=None,
id=class_id,
name=class_name,
))

out_ann_file = osp.join(args.output_dir, 'annotations.json')
label_files = glob.glob(osp.join(args.input_dir, '*.json'))
data["categories"].append(
dict(supercategory=None, id=class_id, name=class_name,)
)

out_ann_file = osp.join(args.output_dir, "annotations.json")
label_files = glob.glob(osp.join(args.input_dir, "*.json"))
for image_id, filename in enumerate(label_files):
print('Generating dataset from:', filename)
print("Generating dataset from:", filename)

label_file = labelme.LabelFile(filename=filename)

base = osp.splitext(osp.basename(filename))[0]
out_img_file = osp.join(
args.output_dir, 'JPEGImages', base + '.jpg'
)
out_img_file = osp.join(args.output_dir, "JPEGImages", base + ".jpg")

img = labelme.utils.img_data_to_arr(label_file.imageData)
PIL.Image.fromarray(img).convert("RGB").save(out_img_file)
data['images'].append(dict(
license=0,
url=None,
file_name=osp.relpath(out_img_file, osp.dirname(out_ann_file)),
height=img.shape[0],
width=img.shape[1],
date_captured=None,
id=image_id,
))

masks = {} # for area
data["images"].append(
dict(
license=0,
url=None,
file_name=osp.relpath(out_img_file, osp.dirname(out_ann_file)),
height=img.shape[0],
width=img.shape[1],
date_captured=None,
id=image_id,
)
)

masks = {} # for area
segmentations = collections.defaultdict(list) # for segmentation
for shape in label_file.shapes:
points = shape['points']
label = shape['label']
group_id = shape.get('group_id')
shape_type = shape.get('shape_type', 'polygon')
points = shape["points"]
label = shape["label"]
group_id = shape.get("group_id")
shape_type = shape.get("shape_type", "polygon")
mask = labelme.utils.shape_to_mask(
img.shape[:2], points, shape_type
)
Expand All @@ -125,7 +119,7 @@ def main():
else:
masks[instance] = mask

if shape_type == 'rectangle':
if shape_type == "rectangle":
(x1, y1), (x2, y2) = points
x1, x2 = sorted([x1, x2])
y1, y2 = sorted([y1, y2])
Expand All @@ -147,19 +141,21 @@ def main():
area = float(pycocotools.mask.area(mask))
bbox = pycocotools.mask.toBbox(mask).flatten().tolist()

data['annotations'].append(dict(
id=len(data['annotations']),
image_id=image_id,
category_id=cls_id,
segmentation=segmentations[instance],
area=area,
bbox=bbox,
iscrowd=0,
))

with open(out_ann_file, 'w') as f:
data["annotations"].append(
dict(
id=len(data["annotations"]),
image_id=image_id,
category_id=cls_id,
segmentation=segmentations[instance],
area=area,
bbox=bbox,
iscrowd=0,
)
)

with open(out_ann_file, "w") as f:
json.dump(data, f)


if __name__ == '__main__':
if __name__ == "__main__":
main()
Loading

0 comments on commit 5c98084

Please sign in to comment.