Skip to content

Commit

Permalink
Fix PEP8 errors and warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
vniclas committed Jun 30, 2021
1 parent ef520c5 commit 897a1e8
Show file tree
Hide file tree
Showing 12 changed files with 57 additions and 44 deletions.
8 changes: 3 additions & 5 deletions src/opendr/engine/target.py
Expand Up @@ -761,18 +761,16 @@ def __str__(self):
return f"Class {self.data} speech command"


#ToDo: Inherit from Target class:
# The problem is that the variables in Target are not masked private (leading underscore) preventing proper getter/setters
# ToDo: Inherit from Target class and merge with version proposed in the semantic segmentation branch
class Heatmap():
"""
This target is used for multi-class segmentation problems or multi-class problems that require heatmap annotations.
"""

def __init__(self,
data: np.ndarray,
description: Optional[str] = None,
class_names: Optional[Dict[int, str]] = None):
# super().__init__()
description: Optional[str]=None,
class_names: Optional[Dict[int, str]]=None):
self._data = None
self._description = None
self._class_names = None
Expand Down
Expand Up @@ -236,7 +236,8 @@ def prepare_data(input_path: Union[str, bytes, os.PathLike], output_path: Union[
:type input_path: str, bytes, PathLike
:param output_path: path to the converted Cityscapes dataset
:type output_path: str, bytes, PathLike
:param generate_train_evaluation: if set to True, the training set will prepared to be used for evaluation. Usually, this is not required.
:param generate_train_evaluation: if set to True, the training set will prepared to be used for evaluation.
Usually, this is not required.
:type generate_train_evaluation: bool
:param num_workers: number of workers to be used in parallel
:type num_workers: int
Expand Down
7 changes: 4 additions & 3 deletions src/opendr/perception/panoptic_segmentation/datasets/kitti.py
Expand Up @@ -237,7 +237,8 @@ def prepare_data(input_path: Union[str, bytes, os.PathLike], output_path: Union[
:type input_path: str, bytes, PathLike
:param output_path: path to the converted Cityscapes dataset
:type output_path: str, bytes, PathLike
:param generate_train_evaluation: if set to True, the training set will prepared to be used for evaluation. Usually, this is not required.
:param generate_train_evaluation: if set to True, the training set will prepared to be used for evaluation.
Usually, this is not required.
:type generate_train_evaluation: bool
:param num_workers: number of workers to be used in parallel
:type num_workers: int
Expand All @@ -257,8 +258,8 @@ def prepare_data(input_path: Union[str, bytes, os.PathLike], output_path: Union[
if output_path.exists():
raise ValueError('The specified output path already exists.')
if not (input_path / 'training').exists() or not (input_path / 'validation').exists():
raise ValueError(
'Please download and extract the KITTI panoptic segmentation dataset first: http://panoptic.cs.uni-freiburg.de/')
raise ValueError('Please download and extract the KITTI panoptic segmentation dataset first: '
'http://panoptic.cs.uni-freiburg.de/')

# COCO-style category list
coco_categories = []
Expand Down
Expand Up @@ -3,4 +3,4 @@
from .mobilenetv3 import *
from .model_factory import create_model
from .config import is_exportable, is_scriptable, set_exportable, set_scriptable
from .activations import *
from .activations import *
Expand Up @@ -64,9 +64,9 @@ def _split_channels(num_chan, num_groups):
def conv2d_same(x,
weight: torch.Tensor,
bias: Optional[torch.Tensor] = None,
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
dilation: Tuple[int, int] = (1, 1),
stride: Tuple[int, int]=(1, 1),
padding: Tuple[int, int]=(0, 0),
dilation: Tuple[int, int]=(1, 1),
groups: int = 1):
ih, iw = x.size()[-2:]
kh, kw = weight.size()[-2:]
Expand Down
Expand Up @@ -232,7 +232,7 @@ def __init__(self,
super(InvertedResidual, self).__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs: int = make_divisible(in_chs * exp_ratio)
mid_chs = make_divisible(in_chs * exp_ratio) # int
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_connect_rate = drop_connect_rate

Expand Down
Expand Up @@ -53,8 +53,8 @@ def build_optimizer(model, optimizer_cfg):
base_lr = optimizer_cfg['lr']
base_wd = optimizer_cfg.get('weight_decay', None)
# weight_decay must be explicitly specified if mult is specified
if ('bias_decay_mult' in paramwise_options or 'norm_decay_mult' in paramwise_options
or 'dwconv_decay_mult' in paramwise_options):
if ('bias_decay_mult' in paramwise_options or 'norm_decay_mult' in paramwise_options or
'dwconv_decay_mult' in paramwise_options):
assert base_wd is not None
# get param-wise options
bias_lr_mult = paramwise_options.get('bias_lr_mult', 1.)
Expand Down
Expand Up @@ -13,7 +13,7 @@


@contextlib.asynccontextmanager
async def completed(trace_name='', name='', sleep_interval=0.05, streams: List[torch.cuda.Stream] = None):
async def completed(trace_name='', name='', sleep_interval=0.05, streams: List[torch.cuda.Stream]=None):
"""
Async context manager that waits for work to complete on
given CUDA streams.
Expand Down
@@ -1,15 +1,28 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# model settings
model = dict(
type='EfficientPS',
pretrained=True,
backbone=dict(
type='tf_efficientnet_b5',
act_cfg = dict(type="Identity"),
act_cfg=dict(type="Identity"),
norm_cfg=dict(type='InPlaceABN', activation='leaky_relu', activation_param=0.01, requires_grad=True),
style='pytorch'),
neck=dict(
type='TWOWAYFPN',
in_channels=[40, 64, 176, 2048], #b0[24, 40, 112, 1280], #b4[32, 56, 160, 1792],
in_channels=[40, 64, 176, 2048], # b0[24, 40, 112, 1280], #b4[32, 56, 160, 1792],
out_channels=256,
norm_cfg=dict(type='InPlaceABN', activation='leaky_relu', activation_param=0.01, requires_grad=True),
act_cfg=None,
Expand Down
Expand Up @@ -49,20 +49,20 @@ class EfficientPsLearner(Learner):
"""

def __init__(self,
lr: float = .07,
iters: int = 160,
batch_size: int = 1,
optimizer: str = 'SGD',
lr_schedule: Optional[Dict[str, Any]] = None,
momentum: float = .9,
weight_decay: float = .0001,
optimizer_config: Optional[Dict[str, Any]] = None,
checkpoint_after_iter: int = 1,
temp_path: str = str(Path(__file__).parent / 'eval_tmp_dir'),
device: str = "cuda:0",
num_workers: int = 1,
seed: Optional[float] = None,
config_file: str = str(Path(__file__).parent / 'configs' / 'singlegpu_sample.py')
lr: float=.07,
iters: int=160,
batch_size: int=1,
optimizer: str='SGD',
lr_schedule: Optional[Dict[str, Any]]=None,
momentum: float=.9,
weight_decay: float=.0001,
optimizer_config: Optional[Dict[str, Any]]=None,
checkpoint_after_iter: int=1,
temp_path: str=str(Path(__file__).parent / 'eval_tmp_dir'),
device: str="cuda:0",
num_workers: int=1,
seed: Optional[float]=None,
config_file: str=str(Path(__file__).parent / 'configs' / 'singlegpu_sample.py')
):
"""
:param lr: learning rate [training]
Expand Down Expand Up @@ -125,10 +125,10 @@ def __init__(self,

def fit(self,
dataset: Any,
val_dataset: Optional[Union[CityscapesDataset, KittiDataset]] = None,
logging_path: str = str(Path(__file__).parent / 'logging'),
silent: bool = False,
verbose: Optional[bool] = None
val_dataset: Optional[Union[CityscapesDataset, KittiDataset]]=None,
logging_path: str=str(Path(__file__).parent / 'logging'),
silent: bool=False,
verbose: Optional[bool]=None
):
"""
This method is used for training the algorithm on a train dataset and validating on a separate dataset.
Expand Down Expand Up @@ -201,7 +201,7 @@ def fit(self,

def eval(self,
dataset: Any,
print_results: bool = False
print_results: bool=False
) -> Dict[str, Any]:
"""
This method is used to evaluate the algorithm on a dataset and returns the following stats:
Expand Down Expand Up @@ -246,7 +246,7 @@ def eval(self,

def infer(self,
batch: Union[Image, List[Image], ImageWithFilename, List[ImageWithFilename]],
return_raw_logits: bool = False
return_raw_logits: bool=False
) -> Union[List[Tuple[Heatmap, Heatmap]], Tuple[Heatmap, Heatmap], np.ndarray]:
"""
This method performs inference on the batch provided.
Expand Down Expand Up @@ -359,7 +359,7 @@ def reset(self) -> None:
raise NotImplementedError

@staticmethod
def download(path: str, mode: str = 'model', trained_on: str = 'cityscapes') -> str:
def download(path: str, mode: str='model', trained_on: str='cityscapes') -> str:
"""
Download data from the OpenDR server. Valid modes include pre-trained model weights and data used in the unit tests.
Expand Down
Expand Up @@ -75,7 +75,7 @@ def inference():
config_file=str(Path(__file__).parent / 'configs' / 'singlegpu_sample.py')
)
learner.load(path=f'{DATA_ROOT}/checkpoints/efficientPS/cityscapes/model.pth')
predictions: List[Tuple[Heatmap, Heatmap]] = learner.infer(images)
predictions = learner.infer(images)


if __name__ == "__main__":
Expand Down
Expand Up @@ -86,12 +86,12 @@ def test_infer_single_image(self):
image = Image(cv2.imread(image_filename))
learner = EfficientPsLearner()
learner.load(self.model_weights)
prediction: Tuple[Heatmap, Heatmap] = learner.infer(image)
prediction = learner.infer(image)
for heatmap in prediction:
self.assertIsInstance(heatmap, Heatmap)

image_with_filename = ImageWithFilename(cv2.imread(image_filename), filename='lindau_000001_000019.png')
prediction: Tuple[Heatmap, Heatmap] = learner.infer(image_with_filename)
learner.infer(image_with_filename)
for heatmap in prediction:
self.assertIsInstance(heatmap, Heatmap)

Expand All @@ -103,7 +103,7 @@ def test_infer_batch_images(self):
images = [Image(cv2.imread(f)) for f in image_filenames]
learner = EfficientPsLearner()
learner.load(self.model_weights)
predictions: List[Tuple[Heatmap, Heatmap]] = learner.infer(images)
predictions = learner.infer(images)
for prediction in predictions:
for heatmap in prediction:
self.assertIsInstance(heatmap, Heatmap)
Expand Down

0 comments on commit 897a1e8

Please sign in to comment.