Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 12 additions & 5 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@ name: CI
# events but only for the master branch
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
Expand All @@ -34,16 +32,25 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install megengine -f https://megengine.org.cn/whl/mge.html

# Runs a set of commands using the runners shell
- name: Pylint check
- name: Format check
run: |
export PYTHONPATH=$PWD:$PYTHONPATH

CHECK_DIR=official/vision/
pip install pylint==2.5.2
pylint official/vision --rcfile=.pylintrc || pylint_ret=$?
pylint $CHECK_DIR --rcfile=.pylintrc || pylint_ret=$?
echo test, and deploy your project.
if [ "$pylint_ret" ]; then
exit $pylint_ret
fi
echo "All lint steps passed!"

pip3 install flake8==3.7.9
flake8 official
echo "All flake check passed!"

pip3 install isort==4.3.21
isort --check-only -rc official
echo "All isort check passed!"
29 changes: 28 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,31 @@
__pycache__/
*log*/
*.jpg
*.png
*.txt

# compilation and distribution
__pycache__
_ext
*.pyc
*.so
build/
dist/
wheels/

# pytorch/python/numpy formats
*.pth
*.pkl
*.npy

# ipython/jupyter notebooks
*.ipynb
**/.ipynb_checkpoints/

# Editor temporaries
*.swn
*.swo
*.swp
*~

# Pycharm editor settings
.idea
3 changes: 2 additions & 1 deletion official/vision/classification/resnet/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
import argparse
import json

import model as resnet_model # pylint-disable=import-error
# pylint: disable=import-error
import model as resnet_model

import cv2
import numpy as np
Expand Down
11 changes: 7 additions & 4 deletions official/vision/classification/resnet/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,8 +229,9 @@ def __init__(
M.init.uniform_(m.bias, -bound, bound)

# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
# so that the residual branch starts with zeros, and each residual block
# behaves like an identity. According to https://arxiv.org/abs/1706.02677
# This improves the model by 0.2~0.3%.
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
Expand Down Expand Up @@ -356,7 +357,8 @@ def resnet152(**kwargs):
)
def resnext50_32x4d(**kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
`"Aggregated Residual Transformation for Deep Neural Networks"
<https://arxiv.org/pdf/1611.05431.pdf>`_

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Expand All @@ -372,7 +374,8 @@ def resnext50_32x4d(**kwargs):
)
def resnext101_32x8d(**kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
`"Aggregated Residual Transformation for Deep Neural Networks"
<https://arxiv.org/pdf/1611.05431.pdf>`_

Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Expand Down
3 changes: 2 additions & 1 deletion official/vision/classification/resnet/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
import multiprocessing
import time

import model as resnet_model # pylint-disable=import-error
# pylint: disable=import-error
import model as resnet_model

import megengine
import megengine.data as data
Expand Down
4 changes: 3 additions & 1 deletion official/vision/classification/resnet/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
import os
import time

import model as resnet_model # pylint-disable=import-error
# pylint: disable=import-error
import model as resnet_model

import megengine
import megengine.autodiff as autodiff
Expand Down Expand Up @@ -117,6 +118,7 @@ def main():


def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
Expand Down
3 changes: 2 additions & 1 deletion official/vision/classification/shufflenet/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
import argparse
import json

import model as snet_model # pylint-disable=import-error
# pylint: disable=import-error
import model as snet_model

import cv2
import numpy as np
Expand Down
15 changes: 11 additions & 4 deletions official/vision/classification/shufflenet/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,10 @@ def __init__(self, inp, oup, mid_channels, *, ksize, stride):
M.BatchNorm2d(mid_channels),
M.ReLU(),
# dw
M.Conv2d(mid_channels, mid_channels, ksize, stride, pad, groups=mid_channels, bias=False,),
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
M.BatchNorm2d(mid_channels),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
Expand Down Expand Up @@ -135,13 +138,15 @@ def __init__(self, num_classes=1000, model_size="1.5x"):
if i == 0:
self.features.append(
ShuffleV2Block(
input_channel, output_channel, mid_channels=output_channel // 2, ksize=3, stride=2,
input_channel, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=2,
)
)
else:
self.features.append(
ShuffleV2Block(
input_channel // 2, output_channel, mid_channels=output_channel // 2, ksize=3, stride=1,
input_channel // 2, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=1,
)
)

Expand All @@ -157,7 +162,9 @@ def __init__(self, num_classes=1000, model_size="1.5x"):
self.globalpool = M.AvgPool2d(7)
if self.model_size == "2.0x":
self.dropout = M.Dropout(0.2)
self.classifier = M.Sequential(M.Linear(self.stage_out_channels[-1], num_classes, bias=False))
self.classifier = M.Sequential(
M.Linear(self.stage_out_channels[-1], num_classes, bias=False)
)
self._initialize_weights()

def forward(self, x):
Expand Down
3 changes: 2 additions & 1 deletion official/vision/classification/shufflenet/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
import multiprocessing
import time

import model as snet_model # pylint-disable=import-error
# pylint: disable=import-error
import model as snet_model

import megengine
import megengine.data as data
Expand Down
6 changes: 4 additions & 2 deletions official/vision/classification/shufflenet/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
import os
import time

import model as snet_model # pylint-disable=import-error
# pylint: disable=import-error
import model as snet_model

import megengine
import megengine.autodiff as autodiff
Expand Down Expand Up @@ -116,6 +117,7 @@ def main():


def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
Expand Down Expand Up @@ -162,7 +164,7 @@ def worker(rank, world_size, ngpus_per_node, args):
print("NOT include ", n, p.shape)
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd}, {"params": params_nwd, "weight_decay": 0},],
[{"params": params_wd}, {"params": params_nwd, "weight_decay": 0}, ],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
Expand Down
2 changes: 2 additions & 0 deletions official/vision/detection/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#!/usr/bin/python3
# -*- coding:utf-8 -*-
12 changes: 6 additions & 6 deletions official/vision/detection/configs/__init__.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
from .atss_res50_coco_1x_800size import atss_res50_coco_1x_800size
from .atss_res101_coco_2x_800size import atss_res101_coco_2x_800size
from .atss_resx101_coco_2x_800size import atss_resx101_coco_2x_800size
from .faster_rcnn_res50_coco_1x_800size import faster_rcnn_res50_coco_1x_800size
from .faster_rcnn_res101_coco_2x_800size import faster_rcnn_res101_coco_2x_800size
from .faster_rcnn_resx101_coco_2x_800size import faster_rcnn_resx101_coco_2x_800size
from .retinanet_res50_coco_1x_800size import retinanet_res50_coco_1x_800size
from .retinanet_res101_coco_2x_800size import retinanet_res101_coco_2x_800size
from .retinanet_resx101_coco_2x_800size import retinanet_resx101_coco_2x_800size
from .fcos_res50_coco_1x_800size import fcos_res50_coco_1x_800size
from .fcos_res101_coco_2x_800size import fcos_res101_coco_2x_800size
from .fcos_resx101_coco_2x_800size import fcos_resx101_coco_2x_800size
from .atss_res50_coco_1x_800size import atss_res50_coco_1x_800size
from .atss_res101_coco_2x_800size import atss_res101_coco_2x_800size
from .atss_resx101_coco_2x_800size import atss_resx101_coco_2x_800size
from .retinanet_res50_coco_1x_800size import retinanet_res50_coco_1x_800size
from .retinanet_res101_coco_2x_800size import retinanet_res101_coco_2x_800size
from .retinanet_resx101_coco_2x_800size import retinanet_resx101_coco_2x_800size

_EXCLUDE = {}
__all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")]
6 changes: 4 additions & 2 deletions official/vision/detection/layers/basic/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,11 @@ def batched_nms(
boxes: Tensor, scores: Tensor, idxs: Tensor, iou_thresh: float, max_output: Optional[int] = None
) -> Tensor:
r"""
Performs non-maximum suppression (NMS) on the boxes according to their intersection-over-union (IoU).
Performs non-maximum suppression (NMS) on the boxes according to
their intersection-over-union (IoU).

:param boxes: tensor of shape `(N, 4)`; the boxes to perform nms on; each box is expected to be in `(x1, y1, x2, y2)` format.
:param boxes: tensor of shape `(N, 4)`; the boxes to perform nms on;
each box is expected to be in `(x1, y1, x2, y2)` format.
:param iou_thresh: ``IoU`` threshold for overlapping.
:param idxs: tensor of shape `(N,)`, the class indexs of boxes in the batch.
:param scores: tensor of shape `(N,)`, the score of boxes.
Expand Down
4 changes: 2 additions & 2 deletions official/vision/detection/layers/det/sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def sample_labels(labels, num_samples, label_value, ignore_label=-1):

topk_tensor = F.zeros_like(labels).astype("float32")
topk_tensor[mask] = uniform(size=num_class)
_, select_inds = F.topk(topk_tensor, k=num_samples-num_class)
_, select_inds = F.topk(topk_tensor, k=num_samples - num_class)

labels[select_inds] = ignore_label
return labels
Expand All @@ -54,7 +54,7 @@ def sample_mask_from_labels(labels, num_sample, sample_value):
return sample_mask

random_tensor = sample_mask * uniform(size=labels.shape)
_, sampled_idx = F.topk(random_tensor, k=num_sample-num_mask)
_, sampled_idx = F.topk(random_tensor, k=num_sample - num_mask)
sample_mask[sampled_idx] = F.zeros(sampled_idx.shape)

return sample_mask
4 changes: 2 additions & 2 deletions official/vision/detection/models/atss.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,8 @@ def get_ground_truth(self, anchors_list, batched_gt_boxes, batched_num_gts):
candidate_idxs = F.concat(candidate_idxs, axis=1)

candidate_ious = F.gather(ious, 1, candidate_idxs)
ious_thr = F.mean(candidate_ious, axis=1, keepdims=True) + \
F.std(candidate_ious, axis=1, keepdims=True)
ious_thr = (F.mean(candidate_ious, axis=1, keepdims=True)
+ F.std(candidate_ious, axis=1, keepdims=True))
is_foreground = F.scatter(
F.zeros(ious.shape), 1, candidate_idxs, F.ones(candidate_idxs.shape)
).astype(bool) & (ious >= ious_thr)
Expand Down
3 changes: 2 additions & 1 deletion official/vision/detection/models/fcos.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,8 @@ def get_ground_truth(self, anchors_list, batched_gt_boxes, batched_num_gts):
is_in_boxes = F.min(offsets, axis=2) > 0

gt_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * (gt_boxes[:, 3] - gt_boxes[:, 1])
areas = F.broadcast_to(F.expand_dims(gt_area, axis=1), offsets.shape[:2]) # FIXME: repeat
# FIXME: use repeat instead of broadcast_to
areas = F.broadcast_to(F.expand_dims(gt_area, axis=1), offsets.shape[:2])
areas[~is_cared_in_the_level] = float("inf")
areas[~is_in_boxes] = float("inf")

Expand Down
6 changes: 1 addition & 5 deletions official/vision/detection/tools/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,7 @@
from megengine.data import DataLoader

from official.vision.detection.tools.data_mapper import data_mapper
from official.vision.detection.tools.utils import (
InferenceSampler,
DetEvaluator,
import_from_file
)
from official.vision.detection.tools.utils import DetEvaluator, InferenceSampler, import_from_file

logger = mge.get_logger(__name__)
logger.setLevel("INFO")
Expand Down
2 changes: 1 addition & 1 deletion official/vision/detection/tools/test_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
from megengine.data import DataLoader

from official.vision.detection.tools.utils import (
DetEvaluator,
InferenceSampler,
PseudoDetectionDataset,
DetEvaluator,
import_from_file
)

Expand Down
1 change: 0 additions & 1 deletion official/vision/detection/tools/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
# from megengine.jit import trace
from megengine.optimizer import SGD

from official.vision.detection.tools.data_mapper import data_mapper
Expand Down
3 changes: 1 addition & 2 deletions official/vision/detection/tools/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

from megengine.data import Collator, RandomSampler, Sampler
from megengine.data.dataset import VisionDataset
# from megengine.jit import trace

from official.vision.detection.tools.data_mapper import data_mapper
from official.vision.detection.tools.nms import py_cpu_nms
Expand Down Expand Up @@ -116,7 +115,7 @@ def sample(self):

def batch(self):
step, length = self.batch_size, len(self.indices)
batch_index = [self.indices[i : i + step] for i in range(0, length, step)]
batch_index = [self.indices[i: i + step] for i in range(0, length, step)]
return iter(batch_index)

def __len__(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ def __init__(self):
)
def deeplabv3plus_res101_cityscapes_768size(**kwargs):
r"""DeepLab v3+ model from
`"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" <https://arxiv.org/abs/1802.02611>`_
`"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1802.02611>`_
"""
return models.DeepLabV3Plus(**kwargs)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ def __init__(self):
)
def deeplabv3plus_res101_voc_512size(**kwargs):
r"""DeepLab v3+ model from
`"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" <https://arxiv.org/abs/1802.02611>`_
`"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1802.02611>`_
"""
return models.DeepLabV3Plus(**kwargs)

Expand Down
Loading