Skip to content

Commit

Permalink
Python refactor and simplification (#2174)
Browse files Browse the repository at this point in the history
* Python code cleanup

* Auto-format by Ultralytics actions

---------

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
  • Loading branch information
glenn-jocher and UltralyticsAssistant committed Jan 13, 2024
1 parent e567c79 commit 2960a2d
Show file tree
Hide file tree
Showing 9 changed files with 140 additions and 144 deletions.
2 changes: 1 addition & 1 deletion export.py
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
"--quantize_uint8" if int8 else "",
"--output_node_names=Identity,Identity_1,Identity_2,Identity_3",
str(f_pb),
str(f),
f,
]
subprocess.run([arg for arg in args if arg], check=True)

Expand Down
16 changes: 11 additions & 5 deletions models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -847,11 +847,17 @@ def pandas(self):
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
r = range(self.n) # iterable
x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
# for d in x:
# for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
# setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
return [
Detections(
[self.ims[i]],
[self.pred[i]],
[self.files[i]],
self.times,
self.names,
self.s,
)
for i in r
]

def print(self):
LOGGER.info(self.__str__())
Expand Down
4 changes: 1 addition & 3 deletions utils/downloads.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,7 @@ def is_url(url, check=True):
def gsutil_getsize(url=""):
# gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
output = subprocess.check_output(["gsutil", "du", url], shell=True, encoding="utf-8")
if output:
return int(output.split()[0])
return 0
return int(output.split()[0]) if output else 0


def url_getsize(url="https://ultralytics.com/images/bus.jpg"):
Expand Down
12 changes: 6 additions & 6 deletions utils/loggers/clearml/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@ Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-t

1. Install the `clearml` python package:

```bash
pip install clearml
```
```bash
pip install clearml
```

2. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions:

```bash
clearml-init
```
```bash
clearml-init
```

That's it! You're done 😎

Expand Down
48 changes: 25 additions & 23 deletions utils/loggers/clearml/clearml_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def construct_dataset(clearml_info_string):
"More than one yaml file was found in the dataset root, cannot determine which one contains "
"the dataset definition this way."
)
elif len(yaml_filenames) == 0:
elif not yaml_filenames:
raise ValueError(
"No yaml definition found in dataset root path, check that there is a correct yaml file "
"inside the dataset root path."
Expand All @@ -43,7 +43,7 @@ def construct_dataset(clearml_info_string):
{"train", "test", "val", "nc", "names"}
), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')"

data_dict = dict()
data_dict = {}
data_dict["train"] = (
str((dataset_root_path / dataset_definition["train"]).resolve()) if dataset_definition["train"] else None
)
Expand Down Expand Up @@ -148,24 +148,26 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres
class_names (dict): dict containing mapping of class int to class name
image (Tensor): A torch tensor containing the actual image data
"""
if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:
# Log every bbox_interval times and deduplicate for any intermittend extra eval runs
if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:
im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))
annotator = Annotator(im=im, pil=True)
for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):
color = colors(i)

class_name = class_names[int(class_nr)]
confidence_percentage = round(float(conf) * 100, 2)
label = f"{class_name}: {confidence_percentage}%"

if conf > conf_threshold:
annotator.rectangle(box.cpu().numpy(), outline=color)
annotator.box_label(box.cpu().numpy(), label=label, color=color)

annotated_image = annotator.result()
self.task.get_logger().report_image(
title="Bounding Boxes", series=image_path.name, iteration=self.current_epoch, image=annotated_image
)
self.current_epoch_logged_images.add(image_path)
if (
len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch
and self.current_epoch >= 0
and (self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images)
):
im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))
annotator = Annotator(im=im, pil=True)
for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):
color = colors(i)

class_name = class_names[int(class_nr)]
confidence_percentage = round(float(conf) * 100, 2)
label = f"{class_name}: {confidence_percentage}%"

if conf > conf_threshold:
annotator.rectangle(box.cpu().numpy(), outline=color)
annotator.box_label(box.cpu().numpy(), label=label, color=color)

annotated_image = annotator.result()
self.task.get_logger().report_image(
title="Bounding Boxes", series=image_path.name, iteration=self.current_epoch, image=annotated_image
)
self.current_epoch_logged_images.add(image_path)
120 changes: 56 additions & 64 deletions utils/loggers/comet/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,34 +166,33 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar

def _get_experiment(self, mode, experiment_id=None):
if mode == "offline":
return (
comet_ml.ExistingOfflineExperiment(
previous_experiment=experiment_id,
**self.default_experiment_kwargs,
)
if experiment_id is not None
else comet_ml.OfflineExperiment(
**self.default_experiment_kwargs,
)
)
try:
if experiment_id is not None:
return comet_ml.ExistingOfflineExperiment(
return comet_ml.ExistingExperiment(
previous_experiment=experiment_id,
**self.default_experiment_kwargs,
)

return comet_ml.OfflineExperiment(
**self.default_experiment_kwargs,
)
return comet_ml.Experiment(**self.default_experiment_kwargs)

else:
try:
if experiment_id is not None:
return comet_ml.ExistingExperiment(
previous_experiment=experiment_id,
**self.default_experiment_kwargs,
)

return comet_ml.Experiment(**self.default_experiment_kwargs)

except ValueError:
logger.warning(
"COMET WARNING: "
"Comet credentials have not been set. "
"Comet will default to offline logging. "
"Please set your credentials to enable online logging."
)
return self._get_experiment("offline", experiment_id)
except ValueError:
logger.warning(
"COMET WARNING: "
"Comet credentials have not been set. "
"Comet will default to offline logging. "
"Please set your credentials to enable online logging."
)
return self._get_experiment("offline", experiment_id)

return

Expand Down Expand Up @@ -242,10 +241,7 @@ def check_dataset(self, data_file):
path = data_config.get("path")
if path and path.startswith(COMET_PREFIX):
path = data_config["path"].replace(COMET_PREFIX, "")
data_dict = self.download_dataset_artifact(path)

return data_dict

return self.download_dataset_artifact(path)
self.log_asset(self.opt.data, metadata={"type": "data-config-file"})

return check_dataset(data_file)
Expand All @@ -269,24 +265,22 @@ def log_predictions(self, image, labelsn, path, shape, predn):
self.log_image(native_scale_image, name=image_name)
self.logged_image_names.append(image_name)

metadata = []
for cls, *xyxy in filtered_labels.tolist():
metadata.append(
{
"label": f"{self.class_names[int(cls)]}-gt",
"score": 100,
"box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]},
}
)
for *xyxy, conf, cls in filtered_detections.tolist():
metadata.append(
{
"label": f"{self.class_names[int(cls)]}",
"score": conf * 100,
"box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]},
}
)

metadata = [
{
"label": f"{self.class_names[int(cls)]}-gt",
"score": 100,
"box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]},
}
for cls, *xyxy in filtered_labels.tolist()
]
metadata.extend(
{
"label": f"{self.class_names[int(cls)]}",
"score": conf * 100,
"box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]},
}
for *xyxy, conf, cls in filtered_detections.tolist()
)
self.metadata_dict[image_name] = metadata
self.logged_images_count += 1

Expand Down Expand Up @@ -398,9 +392,8 @@ def on_pretrain_routine_end(self, paths):
for path in paths:
self.log_asset(str(path))

if self.upload_dataset:
if not self.resume:
self.upload_dataset_artifact()
if self.upload_dataset and not self.resume:
self.upload_dataset_artifact()

return

Expand Down Expand Up @@ -477,23 +470,22 @@ def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs):
return

def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix):
if self.comet_log_per_class_metrics:
if self.num_classes > 1:
for i, c in enumerate(ap_class):
class_name = self.class_names[c]
self.experiment.log_metrics(
{
"mAP@.5": ap50[i],
"mAP@.5:.95": ap[i],
"precision": p[i],
"recall": r[i],
"f1": f1[i],
"true_positives": tp[i],
"false_positives": fp[i],
"support": nt[c],
},
prefix=class_name,
)
if self.comet_log_per_class_metrics and self.num_classes > 1:
for i, c in enumerate(ap_class):
class_name = self.class_names[c]
self.experiment.log_metrics(
{
"mAP@.5": ap50[i],
"mAP@.5:.95": ap[i],
"precision": p[i],
"recall": r[i],
"f1": f1[i],
"true_positives": tp[i],
"false_positives": fp[i],
"support": nt[c],
},
prefix=class_name,
)

if self.comet_log_confusion_matrix:
epoch = self.experiment.curr_epoch
Expand Down
34 changes: 16 additions & 18 deletions utils/loggers/comet/comet_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

try:
import comet_ml
except (ModuleNotFoundError, ImportError):
except ImportError:
comet_ml = None

import yaml
Expand Down Expand Up @@ -109,14 +109,13 @@ def check_comet_weights(opt):
if comet_ml is None:
return

if isinstance(opt.weights, str):
if opt.weights.startswith(COMET_PREFIX):
api = comet_ml.API()
resource = urlparse(opt.weights)
experiment_path = f"{resource.netloc}{resource.path}"
experiment = api.get(experiment_path)
download_model_checkpoint(opt, experiment)
return True
if isinstance(opt.weights, str) and opt.weights.startswith(COMET_PREFIX):
api = comet_ml.API()
resource = urlparse(opt.weights)
experiment_path = f"{resource.netloc}{resource.path}"
experiment = api.get(experiment_path)
download_model_checkpoint(opt, experiment)
return True

return None

Expand All @@ -136,15 +135,14 @@ def check_comet_resume(opt):
if comet_ml is None:
return

if isinstance(opt.resume, str):
if opt.resume.startswith(COMET_PREFIX):
api = comet_ml.API()
resource = urlparse(opt.resume)
experiment_path = f"{resource.netloc}{resource.path}"
experiment = api.get(experiment_path)
set_opt_parameters(opt, experiment)
download_model_checkpoint(opt, experiment)
if isinstance(opt.resume, str) and opt.resume.startswith(COMET_PREFIX):
api = comet_ml.API()
resource = urlparse(opt.resume)
experiment_path = f"{resource.netloc}{resource.path}"
experiment = api.get(experiment_path)
set_opt_parameters(opt, experiment)
download_model_checkpoint(opt, experiment)

return True
return True

return None
Loading

0 comments on commit 2960a2d

Please sign in to comment.