Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

class EpochBasedTrainLoop in mmengine/runner/loops.py: class CocoDataset in mmdet/datasets/coco.py: need at least one array to concatenate #9610

Closed
3 tasks done
flyzxm5177 opened this issue Jan 10, 2023 · 11 comments
Assignees
Labels

Comments

@flyzxm5177
Copy link

Prerequisite

Task

I'm using the official example scripts/configs for the officially supported tasks/models/datasets.

Branch

master branch https://github.com/open-mmlab/mmdetection

Environment

torch version: 1.11.0+cu113 cuda: True
mmdetection: 3.0.0rc5
mmcv: 2.0.0rc3
mmengine: 0.4.0

Reproduces the problem - code sample

start training

runner.train()

Reproduces the problem - command or script

start training

runner.train()

Reproduces the problem - error message

loading annotations into memory...
Done (t=0.01s)
creating index...
index created!

ValueError Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/mmengine/registry/build_functions.py in build_from_cfg(cfg, registry, default_args)
120 else:
--> 121 obj = obj_cls(**args) # type: ignore
122

/kaggle/working/mmdetection/mmdet/datasets/base_det_dataset.py in init(self, seg_map_suffix, proposal_file, file_client_args, *args, **kwargs)
32 self.file_client = FileClient(**file_client_args)
---> 33 super().init(*args, **kwargs)
34

/opt/conda/lib/python3.7/site-packages/mmengine/dataset/base_dataset.py in init(self, ann_file, metainfo, data_root, data_prefix, filter_cfg, indices, serialize_data, pipeline, test_mode, lazy_init, max_refetch)
246 if not lazy_init:
--> 247 self.full_init()
248

/kaggle/working/mmdetection/mmdet/datasets/base_det_dataset.py in full_init(self)
70 if self.serialize_data:
---> 71 self.data_bytes, self.data_address = self._serialize_data()
72

/opt/conda/lib/python3.7/site-packages/mmengine/dataset/base_dataset.py in _serialize_data(self)
763 # TODO Check if np.concatenate is necessary
--> 764 data_bytes = np.concatenate(data_list)
765 # Empty cache for preventing making multiple copies of

<array_function internals> in concatenate(*args, **kwargs)

ValueError: need at least one array to concatenate

During handling of the above exception, another exception occurred:

ValueError Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/mmengine/registry/build_functions.py in build_from_cfg(cfg, registry, default_args)
120 else:
--> 121 obj = obj_cls(**args) # type: ignore
122

/opt/conda/lib/python3.7/site-packages/mmengine/runner/loops.py in init(self, runner, dataloader, max_epochs, val_begin, val_interval, dynamic_intervals)
42 dynamic_intervals: Optional[List[Tuple[int, int]]] = None) -> None:
---> 43 super().init(runner, dataloader)
44 self._max_epochs = int(max_epochs)

/opt/conda/lib/python3.7/site-packages/mmengine/runner/base_loop.py in init(self, runner, dataloader)
26 self.dataloader = runner.build_dataloader(
---> 27 dataloader, seed=runner.seed, diff_rank_seed=diff_rank_seed)
28 else:

/opt/conda/lib/python3.7/site-packages/mmengine/runner/runner.py in build_dataloader(dataloader, seed, diff_rank_seed)
1332 if isinstance(dataset_cfg, dict):
-> 1333 dataset = DATASETS.build(dataset_cfg)
1334 if hasattr(dataset, 'full_init'):

/opt/conda/lib/python3.7/site-packages/mmengine/registry/registry.py in build(self, cfg, *args, **kwargs)
520 """
--> 521 return self.build_func(cfg, *args, **kwargs, registry=self)
522

/opt/conda/lib/python3.7/site-packages/mmengine/registry/build_functions.py in build_from_cfg(cfg, registry, default_args)
135 raise type(e)(
--> 136 f'class {obj_cls.__name__} in ' # type: ignore
137 f'{cls_location}.py: {e}')

ValueError: class CocoDataset in mmdet/datasets/coco.py: need at least one array to concatenate

During handling of the above exception, another exception occurred:

ValueError Traceback (most recent call last)
/tmp/ipykernel_23/3729266276.py in
1 # start training
----> 2 runner.train()

/opt/conda/lib/python3.7/site-packages/mmengine/runner/runner.py in train(self)
1647
1648 self._train_loop = self.build_train_loop(
-> 1649 self._train_loop) # type: ignore
1650
1651 # build_optimizer should be called before build_param_scheduler

/opt/conda/lib/python3.7/site-packages/mmengine/runner/runner.py in build_train_loop(self, loop)
1441 loop_cfg,
1442 default_args=dict(
-> 1443 runner=self, dataloader=self._train_dataloader))
1444 else:
1445 by_epoch = loop_cfg.pop('by_epoch')

/opt/conda/lib/python3.7/site-packages/mmengine/registry/registry.py in build(self, cfg, *args, **kwargs)
519 >>> model = MODELS.build(cfg)
520 """
--> 521 return self.build_func(cfg, *args, **kwargs, registry=self)
522
523 def _add_child(self, registry: 'Registry') -> None:

/opt/conda/lib/python3.7/site-packages/mmengine/registry/build_functions.py in build_from_cfg(cfg, registry, default_args)
134 obj_cls.module.split('.')) # type: ignore
135 raise type(e)(
--> 136 f'class {obj_cls.__name__} in ' # type: ignore
137 f'{cls_location}.py: {e}')
138

ValueError: class EpochBasedTrainLoop in mmengine/runner/loops.py: class CocoDataset in mmdet/datasets/coco.py: need at least one array to concatenate

Additional information

I ran this tutorial (https://github.com/open-mmlab/mmdetection/blob/3.x/demo/MMDet_InstanceSeg_Tutorial.ipynb) without changing any code, but I ran into a problem when I get to runner.train(), how should I fix it?

@cpwan
Copy link

cpwan commented Jan 16, 2023

seems to be the same issue with #9613
I also have the same error after pulling the latest 3.x branch. It worked fine before but it is giving me this error now.

@cpwan
Copy link

cpwan commented Jan 16, 2023

Found out why.

The metainfo has been changed to lowercase since #9469.

On the custom dataset, use lowercase letters for keys in the dictionary instead.
For example, in the jupyter notebook demo, change

cfg.metainfo = {
    'CLASSES': ('balloon', ),
    'PALETTE': [
        (220, 20, 60),
    ]
}

to

cfg.metainfo = {
    'classes': ('balloon', ),
    'palette': [
        (220, 20, 60),
    ]
}

should work.

@JialiGithub
Copy link

Found out why.

The metainfo has been changed to lowercase since #9469.

On the custom dataset, use lowercase letters for keys in the dictionary instead. For example, in the jupyter notebook demo, change

cfg.metainfo = {
    'CLASSES': ('balloon', ),
    'PALETTE': [
        (220, 20, 60),
    ]
}

to

cfg.metainfo = {
    'classes': ('balloon', ),
    'palette': [
        (220, 20, 60),
    ]
}

should work.

This works! Thanks a lot!

@1314520gu
Copy link

怎么解决的 我一脸懵

@JunKaiLiao
Copy link

JunKaiLiao commented May 10, 2023

Hi, the followings is the code stored in configs/detr/detr_r101_100e_coco.py,
I haved modified the metainfo, but I still got the issue, can someboy help me, thanks !!
-> ValueError: class EpochBasedTrainLoop in mmengine/runner/loops.py: class CocoDataset in mmdet/datasets/coco.py: need at least one array to concatenate

CODE:
base = [
'../base/datasets/coco_detection.py', '../base/default_runtime.py'
]
model = dict(
type='DETR',
num_queries=100,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=1),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='ChannelMapper',
in_channels=[2048],
kernel_size=1,
out_channels=256,
act_cfg=None,
norm_cfg=None,
num_outs=1),
encoder=dict( # DetrTransformerEncoder
num_layers=6,
layer_cfg=dict( # DetrTransformerEncoderLayer
self_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.1,
batch_first=True),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True)))),
decoder=dict( # DetrTransformerDecoder
num_layers=6,
layer_cfg=dict( # DetrTransformerDecoderLayer
self_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.1,
batch_first=True),
cross_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.1,
batch_first=True),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True))),
return_intermediate=True),
positional_encoding=dict(num_feats=128, normalize=True),
bbox_head=dict(
type='DETRHead',
num_classes=80,
embed_dims=256,
loss_cls=dict(
type='CrossEntropyLoss',
bg_cls_weight=0.1,
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)),

train_cfg=dict(
    assigner=dict(
        type='HungarianAssigner',
        match_costs=[
            dict(type='ClassificationCost', weight=1.),
            dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
            dict(type='IoUCost', iou_mode='giou', weight=2.0)
        ])),
test_cfg=dict(max_per_img=100))

train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{base.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))

optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(
custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}))

max_epochs = 150
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')

param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[100],
gamma=0.1)
]

auto_scale_lr = dict(base_batch_size=16)

@amanikiruga
Copy link

@JunKaiLiao Did the solution above not work for you?

@JunKaiLiao
Copy link

@amanikiruga I found out that the class name in anntation file was wrong. After fixing that, it can train without any issue. Thank you!!

@jennzhuge
Copy link

jennzhuge commented Jun 6, 2023

I'm getting the same error, but I'm not using uppercase class names. Can anyone help?

Traceback (most recent call last):
File "/mmengine/mmengine/registry/build_functions.py", line 121, in build_from_cfg
obj = obj_cls(**args) # type: ignore
File "/p/home/jusers/zhuge1/juwels/perception/src/perception/datasets/carla.py", line 149, in init
super().init(
File "/mmengine/mmengine/dataset/base_dataset.py", line 250, in init
self.full_init()
File "/mmengine/mmengine/dataset/base_dataset.py", line 310, in full_init
self.data_bytes, self.data_address = self._serialize_data()
File "/mmengine/mmengine/dataset/base_dataset.py", line 772, in _serialize_data
data_bytes = np.concatenate(data_list)
File "<array_function internals>", line 180, in concatenate
ValueError: need at least one array to concatenate

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "/mmengine/mmengine/registry/build_functions.py", line 121, in build_from_cfg
obj = obj_cls(**args) # type: ignore
File "/mmengine/mmengine/runner/loops.py", line 44, in init
super().init(runner, dataloader)
File "/mmengine/mmengine/runner/base_loop.py", line 26, in init
self.dataloader = runner.build_dataloader(
File "/mmengine/mmengine/runner/runner.py", line 1346, in build_dataloader
dataset = DATASETS.build(dataset_cfg)
File "/mmengine/mmengine/registry/registry.py", line 545, in build
return self.build_func(cfg, *args, **kwargs, registry=self)
File "/mmengine/mmengine/registry/build_functions.py", line 135, in build_from_cfg
raise type(e)(
ValueError: class CarlaDataset in perception/datasets/carla.py: need at least one array to concatenate

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "tools/train.py", line 132, in
main()
File "tools/train.py", line 128, in main
runner.train()
File "/mmengine/mmengine/runner/runner.py", line 1672, in train
self._train_loop = self.build_train_loop(
File "/mmengine/mmengine/runner/runner.py", line 1464, in build_train_loop
loop = LOOPS.build(
File "/mmengine/mmengine/registry/registry.py", line 545, in build
return self.build_func(cfg, *args, **kwargs, registry=self)
File "/mmengine/mmengine/registry/build_functions.py", line 135, in build_from_cfg
raise type(e)(
ValueError: class EpochBasedTrainLoop in mmengine/runner/loops.py: class CarlaDataset in perception/datasets/carla.py: need at least one array to concatenate

here's my custom dataset class:

@DATASETS.register_module()
class CarlaDataset(BaseDataset):
r"""Carla Dataset.

This class serves as the API for experiments on Carla data.

Args:
    data_root (str): Path of dataset root.
    ann_file (str): Annotation file path. Defaults to ''.
    metainfo (dict, optional): Meta information for dataset, such as class
        information. Defaults to None.
    data_prefix (dict): Prefix for training data. Defaults to
        dict(img_path='').
    pipeline (list, optional): Processing pipeline. Defaults to [].
    box_type_3d (str): Type of 3D box of this dataset.
        Based on the `box_type_3d`, the dataset will encapsulate the box
        to its original format then converted them to `box_type_3d`.
        Defaults to 'LiDAR' in this dataset. Available options includes:

        - 'LiDAR': Box in LiDAR coordinates.
        - 'Depth': Box in depth coordinates, usually for indoor dataset.
        - 'Camera': Box in camera coordinates.

    filter_empty_gt (bool): Whether to filter the data with empty GT.
        If it's set to be True, the example with empty annotations after
        data pipeline will be dropped and a random example will be chosen
        in `__getitem__`. Defaults to True.
    test_mode (bool, optional): ``test_mode=True`` means in test phase.
        Defaults to False.
    load_eval_anns (bool): Whether to load annotations in test_mode,
        the annotation will be save in `eval_ann_infos`, which can be
        used in Evaluator. Defaults to True.
    show_ins_var (bool): For debug purpose. Whether to show variation
        of the number of instances before and after through pipeline.
        Defaults to False.
    past_data_cfg (dict): A specification of previous frames to load.
        Available options include:

        - `n_frames`: Number of previous frames to load.
        - `step`: Offset between frames.
        - `strict`: Whether to only load samples containing a complete set
        of frames equal in length to `n_frames`. If True, it skips partial
        data at the beginning of a sequence.

    filter_cfg (dict, optional): Config for filter data. By default sorts
    frames by sequence number.
"""

# METAINFO = {
metainfo = {
    'classes':
    ('road', 'route', 'lane', 'vehicle', 'agent', 'red_traffic_light',
     'yellow_traffic_light', 'green_traffic_light', 'pedestrian',
     'stop_sign', 'yield_sign', 'animal_crossing_sign', 'lane_reduct_sign',
     'no_turn_sign', 'one_way_sign', 'speed_limit_30_sign',
     'speed_limit_40_sign', 'speed_limit_50_sign', 'speed_limit_60_sign',
     'speed_limit_70_sign', 'speed_limit_80_sign', 'speed_limit_90_sign',
     'speed_limit_100_sign', 'obstacle'),
    'seg_classes': ('road', 'lane'),
    'det_classes': ('vehicle', 'pedestrian')
}
_fully_initialized: bool = False

def __init__(self,
             data_root: str,
             ann_file: str,
             metainfo: Optional[dict] = None,
             data_prefix: dict = dict(img_path=''),
             pipeline: List[Union[dict, Callable]] = [],
             box_type_3d: dict = 'LiDAR',
             filter_empty_gt: bool = True,
             test_mode: bool = False,
             load_eval_anns: bool = True,
             show_ins_var: bool = False,
             past_data_cfg: Optional[dict] = dict(
                 n_frames=0, step=1, strict=False),
             filter_cfg: Optional[dict] = dict(sort_snapshots=True),
             **kwargs) -> None:
    self.filter_empty_gt = filter_empty_gt
    self.load_eval_anns = load_eval_anns

    past_data_range = None
    if past_data_cfg is not None:
        n_frames = past_data_cfg.get("n_frames", 0)
        step = past_data_cfg.get("step", 1)
        past_data_range = range(-step, -(step * n_frames + 1), -step)
    self.past_data_range = past_data_range
    self.past_data_cfg = past_data_cfg

    self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d)

    if metainfo is not None and 'seg_classes' in metainfo:
        seg_classes = metainfo['seg_classes']
    else:
        # seg_classes = self.METAINFO['seg_classes']
        seg_classes = self.metainfo['seg_classes']

    self.seg_label_mapping = {
        i: -1
        # for i in range(len(self.METAINFO['classes']))
        for i in range(len(self.metainfo['classes']))
    }
    self.seg_label_mapping[-1] = -1
    for label_idx, name in enumerate(seg_classes):
        # ori_label = self.METAINFO['classes'].index(name)
        ori_label = self.metainfo['classes'].index(name)
        self.seg_label_mapping[ori_label] = label_idx

    if metainfo is not None and 'det_classes' in metainfo:
        det_classes = metainfo['det_classes']
    else:
        # det_classes = self.METAINFO['det_classes']
        det_classes = self.metainfo['det_classes']

    self.det_label_mapping = {
        i: -1
        # for i in range(len(self.METAINFO['classes']))
        for i in range(len(self.metainfo['classes']))
    }
    self.det_label_mapping[-1] = -1
    for label_idx, name in enumerate(det_classes):
        # ori_label = self.METAINFO['classes'].index(name)
        ori_label = self.metainfo['classes'].index(name)
        self.det_label_mapping[ori_label] = label_idx

    self.num_ins_per_cat = {name: 0 for name in det_classes}

    super().__init__(
        ann_file=ann_file,
        metainfo=metainfo,
        data_root=data_root,
        data_prefix=data_prefix,
        pipeline=pipeline,
        test_mode=test_mode,
        filter_cfg=filter_cfg,
        **kwargs)

    # can be accessed by other components in runner
    self.metainfo['box_type_3d'] = box_type_3d
    self.metainfo['seg_label_mapping'] = self.seg_label_mapping
    self.metainfo['det_label_mapping'] = self.det_label_mapping

    # used for showing variation of the number of instances before and
    # after through the pipeline
    self.show_ins_var = show_ins_var

@shuangkouyizu
Copy link

At the beginning, I corrected it according to the plan given above and it still didn’t work.
Finally, it was changed as follows and worked:

“pip list“ to check your mmdet installation location;my mmdet is in anaconda3/....../site-packages/mmdet, change the file content in /site-packages/mmdet/coco.py.
metainfo = {
'classes': ('xxx', ),
'palette': [
(220, 20, 60),
]
}

@rafaelgov95
Copy link

No início, corrigi de acordo com o plano dado acima e ainda não funcionou. Por fim, foi alterado da seguinte forma e funcionou:

“pip list“ para verificar o local de instalação do mmdet; meu mmdet está em anaconda3/....../site-packages/mmdet, altere o conteúdo do arquivo em /site-packages/mmdet/coco.py. metainfo = { 'classes': ('xxx', ), 'paleta': [ (220, 20, 60), ] }

isn't this wrong?

@camongman
Copy link

camongman commented Jul 24, 2023

It's the same error that happened to me.
I've tested with my custom dataset and initial model is like this.

model_name = 'rtmdet_m_8xb32-300e_coco'
checkpoint = '/mmdetection/checkpoints/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth'

Please, help me and welcome to any comments.
Thanks a lot in advance.

My environment is in the docker desktop in win10 and I used the "Dockerfile" here and installed through it.

ARG PYTORCH="1.9.0"
ARG CUDA="11.1"
ARG CUDNN="8"

FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel

ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" \
    TORCH_NVCC_FLAGS="-Xfatbin -compress-all" \
    CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" \
    FORCE_CUDA="1"

# Avoid Public GPG key error
# https://github.com/NVIDIA/nvidia-container-toolkit/issues/258
RUN rm /etc/apt/sources.list.d/cuda.list \
    && rm /etc/apt/sources.list.d/nvidia-ml.list \
    && apt-key del 7fa2af80 \
    && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub \
    && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub

# (Optional, use Mirror to speed up downloads)
# RUN sed -i 's/http:\/\/archive.ubuntu.com\/ubuntu\//http:\/\/mirrors.aliyun.com\/ubuntu\//g' /etc/apt/sources.list && \
#    pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple

# Install the required packages
RUN apt-get update \
    && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \
    && apt-get clean \
    && rm -rf /var/lib/apt/lists/*

# Install MMEngine and MMCV
RUN pip install openmim && \
    mim install "mmengine>=0.7.1" "mmcv>=2.0.0rc4"

# Install MMDetection
RUN conda clean --all \
    && git clone https://github.com/open-mmlab/mmdetection.git /mmdetection \
    && cd /mmdetection \
    && pip install --no-cache-dir -e .

WORKDIR /mmdetection

config is like this below.

# Inherit and overwrite part of the config based on this config
_base_ = './rtmdet_m_8xb32-300e_coco.py'

data_root = '/mmdetection/workspace2/dataset/excavator/' # dataset root

train_batch_size_per_gpu = 4
train_num_workers = 2

max_epochs = 10
stage2_num_epochs = 1
base_lr = 0.00008


metainfo = {
    'classes': ('excavator', ),
    'palette': [
        (220, 20, 60),
    ]
}

train_dataloader = dict(
    batch_size=train_batch_size_per_gpu,
    num_workers=train_num_workers,
    dataset=dict(
        data_root=data_root,
        metainfo=metainfo,
        data_prefix=dict(img='train/'),
        ann_file='train.json'))

val_dataloader = dict(
    dataset=dict(
        data_root=data_root,
        metainfo=metainfo,
        data_prefix=dict(img='val/'),
        ann_file='val.json'))

test_dataloader = val_dataloader

val_evaluator = dict(ann_file=data_root + 'val.json')

test_evaluator = val_evaluator

model = dict(bbox_head=dict(num_classes=1))

# learning rate
param_scheduler = [
    dict(
        type='LinearLR',
        start_factor=1.0e-5,
        by_epoch=False,
        begin=0,
        end=10),
    dict(
        # use cosine lr from 10 to 20 epoch
        type='CosineAnnealingLR',
        eta_min=base_lr * 0.05,
        begin=max_epochs // 2,
        end=max_epochs,
        T_max=max_epochs // 2,
        by_epoch=True,
        convert_to_iter_based=True),
]

train_pipeline_stage2 = [
    dict(type='LoadImageFromFile', backend_args=None),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(
        type='RandomResize',
        scale=(640, 640),
        ratio_range=(0.1, 2.0),
        keep_ratio=True),
    dict(type='RandomCrop', crop_size=(640, 640)),
    dict(type='YOLOXHSVRandomAug'),
    dict(type='RandomFlip', prob=0.5),
    dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
    dict(type='PackDetInputs')
]

# optimizer
optim_wrapper = dict(
    _delete_=True,
    type='OptimWrapper',
    optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
    paramwise_cfg=dict(
        norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))

default_hooks = dict(
    checkpoint=dict(
        interval=5,
        max_keep_ckpts=2,  # only keep latest 2 checkpoints
        save_best='auto'
    ),
    logger=dict(type='LoggerHook', interval=5))

custom_hooks = [
    dict(
        type='PipelineSwitchHook',
        switch_epoch=max_epochs - stage2_num_epochs,
        switch_pipeline=train_pipeline_stage2)
]

# load COCO pre-trained weight
load_from = '/mmdetection/checkpoints/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth'

train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
visualizer = dict(vis_backends=[dict(type='LocalVisBackend'),dict(type='TensorboardVisBackend')])

Errros's here:

Traceback (most recent call last):
  File "/mmdetection/tools/train.py", line 133, in <module>
    main()
  File "/mmdetection/tools/train.py", line 129, in main
    runner.train()
  File "/opt/conda/lib/python3.9/site-packages/mmengine/runner/runner.py", line 1701, in train
    self._train_loop = self.build_train_loop(
  File "/opt/conda/lib/python3.9/site-packages/mmengine/runner/runner.py", line 1493, in build_train_loop
    loop = LOOPS.build(
  File "/opt/conda/lib/python3.9/site-packages/mmengine/registry/registry.py", line 570, in build
    return self.build_func(cfg, *args, **kwargs, registry=self)
  File "/opt/conda/lib/python3.9/site-packages/mmengine/registry/build_functions.py", line 144, in build_from_cfg
    raise type(e)(
ValueError: class `EpochBasedTrainLoop` in mmengine/runner/loops.py: class `CocoDataset` in mmdet/datasets/coco.py: need at least one array to concatenate

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
Projects
None yet
Development

No branches or pull requests