From af37c12bd658c38f46994f27f1594cee8d636218 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 27 Oct 2021 15:35:17 +0200 Subject: [PATCH 01/15] Update tqdm for fixed width --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 4886034d811f..736edd036fad 100644 --- a/train.py +++ b/train.py @@ -289,7 +289,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: - pbar = tqdm(pbar, total=nb) # progress bar + pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) From ac72e0e679f545f8bd6976fef8fa51efbea3d295 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 27 Oct 2021 15:42:10 +0200 Subject: [PATCH 02/15] Update val.py --- val.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/val.py b/val.py index 1fc98c71198b..ce9919d513af 100644 --- a/val.py +++ b/val.py @@ -160,7 +160,8 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + pbar = tqdm(enumerate(dataloader), desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for batch_i, (img, targets, paths, shapes) in pbar: t1 = time_sync() img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 From afd93a73bb2f2eb703ee01aca82d546f428debe5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 27 Oct 2021 15:43:46 +0200 Subject: [PATCH 03/15] Update val.py --- val.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/val.py b/val.py index ce9919d513af..4aab87e275d3 100644 --- a/val.py +++ b/val.py @@ -160,8 +160,8 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - pbar = tqdm(enumerate(dataloader), desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - for batch_i, (img, targets, paths, shapes) in pbar: + pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for batch_i, (img, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 From feeaf77551d0e054ea8e3f40cb095e525781977d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:03:38 +0100 Subject: [PATCH 04/15] Try ncols= in train.py --- train.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 698d031ad3c6..19a48f6fabd2 100644 --- a/train.py +++ b/train.py @@ -5,11 +5,11 @@ Usage: $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 """ - import argparse import math import os import random +import shutil import sys import time from copy import deepcopy @@ -289,7 +289,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: - pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + # pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(pbar, total=nb, ncols=shutil.get_terminal_size().columns) optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) From f89cfefefa0c75a024dffcce997984568d2fe701 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:08:28 +0100 Subject: [PATCH 05/15] NCOLS --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 19a48f6fabd2..bd412bd73a2a 100644 --- a/train.py +++ b/train.py @@ -54,6 +54,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +NCOLS = shutil.get_terminal_size().columns def train(hyp, # path/to/hyp.yaml or hyp dictionary @@ -290,7 +291,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: # pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - pbar = tqdm(pbar, total=nb, ncols=shutil.get_terminal_size().columns) + pbar = tqdm(pbar, total=nb, ncols=NCOLS) optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) From b4baf01fc24a1c2fe4fb3ba1ab616af860c87606 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:15:43 +0100 Subject: [PATCH 06/15] NCOLS --- train.py | 4 +--- utils/general.py | 2 ++ val.py | 5 +++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index bd412bd73a2a..7609bec6d8aa 100644 --- a/train.py +++ b/train.py @@ -9,7 +9,6 @@ import math import os import random -import shutil import sys import time from copy import deepcopy @@ -40,7 +39,7 @@ from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.downloads import attempt_download -from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, +from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer) @@ -54,7 +53,6 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -NCOLS = shutil.get_terminal_size().columns def train(hyp, # path/to/hyp.yaml or hyp dictionary diff --git a/utils/general.py b/utils/general.py index 8f59d487edfb..93661dc2a11e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -3,6 +3,7 @@ General utils """ +import shutil import contextlib import glob import logging @@ -40,6 +41,7 @@ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory +NCOLS = shutil.get_terminal_size().columns # terminal window size def set_logging(name=None, verbose=True): diff --git a/val.py b/val.py index 7f23b8704de5..f1c8d4b1b8c9 100644 --- a/val.py +++ b/val.py @@ -26,7 +26,7 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, +from utils.general import (LOGGER, NCOLS, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class @@ -162,7 +162,8 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + # pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, ncols=NCOLS) for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt: From bb73069e5345b7fc176c97fade7ce20263e4120d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 10 Nov 2021 14:16:43 +0000 Subject: [PATCH 07/15] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- train.py | 8 ++++---- utils/general.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 7609bec6d8aa..f63bb6d210c0 100644 --- a/train.py +++ b/train.py @@ -39,10 +39,10 @@ from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.downloads import attempt_download -from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size, check_requirements, - check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, - intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, - print_args, print_mutation, strip_optimizer) +from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size, + check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, + init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, + one_cycle, print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss diff --git a/utils/general.py b/utils/general.py index 93661dc2a11e..3fc4bafe21a9 100755 --- a/utils/general.py +++ b/utils/general.py @@ -3,7 +3,6 @@ General utils """ -import shutil import contextlib import glob import logging @@ -12,6 +11,7 @@ import platform import random import re +import shutil import signal import time import urllib From 22f7205bed5d6282c26fef159d662f8347db4772 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:18:01 +0100 Subject: [PATCH 08/15] bar_format --- train.py | 2 +- val.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index f63bb6d210c0..412c85236a4d 100644 --- a/train.py +++ b/train.py @@ -289,7 +289,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: # pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - pbar = tqdm(pbar, total=nb, ncols=NCOLS) + pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) diff --git a/val.py b/val.py index f1c8d4b1b8c9..430efaf6f103 100644 --- a/val.py +++ b/val.py @@ -163,7 +163,7 @@ def run(data, loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] # pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - pbar = tqdm(dataloader, desc=s, ncols=NCOLS) + pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt: From b7c796aa235ceb4a1e923a6cfa7a8cd01b9a5d4c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:20:57 +0100 Subject: [PATCH 09/15] position 0 leave true --- train.py | 2 +- val.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 412c85236a4d..a8bffc9a756a 100644 --- a/train.py +++ b/train.py @@ -289,7 +289,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: # pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') + pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0, leave=True) optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) diff --git a/val.py b/val.py index 430efaf6f103..9d6c2a5195c5 100644 --- a/val.py +++ b/val.py @@ -163,7 +163,7 @@ def run(data, loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] # pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') + pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0, leave=True) for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt: From ebe6abf1591658b3c099d7a91e740353b5e70851 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:24:54 +0100 Subject: [PATCH 10/15] exp0 --- train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/train.py b/train.py index a8bffc9a756a..17545e1eaa80 100644 --- a/train.py +++ b/train.py @@ -339,6 +339,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + tqdm._instances.clear() pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) From ea875c908f16d4601b21b3274e923e489962ee74 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:26:07 +0100 Subject: [PATCH 11/15] auto --- train.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/train.py b/train.py index 17545e1eaa80..8726473dcedb 100644 --- a/train.py +++ b/train.py @@ -23,7 +23,7 @@ from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import SGD, Adam, lr_scheduler -from tqdm import tqdm +from tqdm.auto import tqdm FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory @@ -339,7 +339,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - tqdm._instances.clear() pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) From 396fb4a7ca8102cab44a8b98e558d9fbe160fa63 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:35:40 +0100 Subject: [PATCH 12/15] auto --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index 9d6c2a5195c5..c65f4e906b1b 100644 --- a/val.py +++ b/val.py @@ -163,7 +163,7 @@ def run(data, loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] # pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0, leave=True) + pbar = tqdm(dataloader, desc=s, ncols=0, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0, leave=True) for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt: From 73d958344c065706fc3ca109a54899df8c4aeecb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:37:59 +0100 Subject: [PATCH 13/15] Cleanup --- train.py | 5 ++--- val.py | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 8726473dcedb..4193365d5a09 100644 --- a/train.py +++ b/train.py @@ -23,7 +23,7 @@ from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import SGD, Adam, lr_scheduler -from tqdm.auto import tqdm +from tqdm import tqdm FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory @@ -288,8 +288,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: - # pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0, leave=True) + pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) diff --git a/val.py b/val.py index c65f4e906b1b..53ac3038fdc1 100644 --- a/val.py +++ b/val.py @@ -162,8 +162,7 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - # pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar - pbar = tqdm(dataloader, desc=s, ncols=0, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0, leave=True) + pbar = tqdm(dataloader, desc=s, ncols=80, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt: From 6c33de0b8db63667fdcee08fe2fbe900cef982bf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:39:21 +0100 Subject: [PATCH 14/15] Cleanup --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index 53ac3038fdc1..62a30ac09d39 100644 --- a/val.py +++ b/val.py @@ -162,7 +162,7 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - pbar = tqdm(dataloader, desc=s, ncols=80, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt: From cd9dacf056dec6458176376eefb8aa74617c8f99 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:41:45 +0100 Subject: [PATCH 15/15] Cleanup --- utils/general.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 3fc4bafe21a9..fa56ed49aba8 100755 --- a/utils/general.py +++ b/utils/general.py @@ -41,7 +41,6 @@ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory -NCOLS = shutil.get_terminal_size().columns # terminal window size def set_logging(name=None, verbose=True): @@ -836,3 +835,7 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path + + +# Variables +NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size