Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Image classification yolov5 model #301

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
2 changes: 1 addition & 1 deletion DataCollection/GenerateImages.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def _decay_value_radically_norm(x, centers, max_value, min_value, dev):

l, a, b = cv2.split(lab)

for num in range(0, 5):
for num in range(0, 6):
value = float(decimal.Decimal(random.randrange(10, 1000))/100)
clahe = cv2.createCLAHE(clipLimit=value, tileGridSize=(8,8))
cl = clahe.apply(l)
Expand Down
203 changes: 138 additions & 65 deletions DataCollection/ImageAugmentation.ipynb

Large diffs are not rendered by default.

587 changes: 587 additions & 0 deletions ImageClassification/imageClassificationcnn.ipynb

Large diffs are not rendered by default.

249 changes: 249 additions & 0 deletions ImageClassification/imageclassification.ipynb

Large diffs are not rendered by default.

169 changes: 169 additions & 0 deletions ObjectDetectionYolov5/objectDetection_train.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "code",
"metadata": {
"id": "wbvMlHd_QwMG",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "fd0147bf-9e26-41de-eb98-fd0c58c361cd"
},
"source": [
"!git clone https://github.com/ultralytics/yolov5 \n",
"%cd yolov5\n",
"%pip install -qr requirements.txt \n",
"\n",
"import torch\n",
"import utils\n",
"display = utils.notebook_init() "
],
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"YOLOv5 🚀 v7.0-120-g3e55763 Python-3.9.16 torch-1.13.1+cu116 CPU\n"
]
},
{
"output_type": "stream",
"name": "stdout",
"text": [
"Setup complete ✅ (2 CPUs, 12.7 GB RAM, 25.5/107.7 GB disk)\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# traing the YOLOV5 model\n",
"\n",
"!python train.py --img 640 --batch 16 --epochs 3 --data /content/yolov5/data/data.yaml --weights yolov5s.pt "
],
"metadata": {
"id": "JIvXqot4f8d1",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "86e52852-2746-4466-ea66-e1f3e46c2f45"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=/content/yolov5/data/data.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=None, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n",
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
"YOLOv5 🚀 v7.0-120-g3e55763 Python-3.9.16 torch-1.13.1+cu116 CPU\n",
"\n",
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
"\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n",
"\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n",
"\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
"2023-03-15 11:20:33.924337: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2023-03-15 11:20:35.565625: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/lib/python3.9/dist-packages/cv2/../../lib64:/usr/local/lib/python3.9/dist-packages/cv2/../../lib64:/usr/local/nvidia/lib:/usr/local/nvidia/lib64\n",
"2023-03-15 11:20:35.565860: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/lib/python3.9/dist-packages/cv2/../../lib64:/usr/local/lib/python3.9/dist-packages/cv2/../../lib64:/usr/local/nvidia/lib:/usr/local/nvidia/lib64\n",
"2023-03-15 11:20:35.565890: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n",
"Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\n",
"100% 14.1M/14.1M [00:00<00:00, 80.7MB/s]\n",
"\n",
"Overriding model.yaml nc=80 with nc=52\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n",
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
" 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
" 4 -1 2 115712 models.common.C3 [128, 128, 2] \n",
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
" 6 -1 3 625152 models.common.C3 [256, 256, 3] \n",
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
" 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n",
" 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n",
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
" 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
" 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
" 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
" 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
" 24 [17, 20, 23] 1 153729 models.yolo.Detect [52, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
"Model summary: 214 layers, 7159873 parameters, 7159873 gradients, 16.4 GFLOPs\n",
"\n",
"Transferred 343/349 items from yolov5s.pt\n",
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n",
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/drive/MyDrive/data_imageClassification/object_detection/train/labels.cache... 3619 images, 117 backgrounds, 0 corrupt: 100% 3736/3736 [00:00<?, ?it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/drive/MyDrive/data_imageClassification/object_detection/valid/images/labels.cache... 4000 images, 33 backgrounds, 0 corrupt: 100% 4033/4033 [00:00<?, ?it/s]\n",
"\n",
"\u001b[34m\u001b[1mAutoAnchor: \u001b[0m5.99 anchors/target, 1.000 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
"Plotting labels to runs/train/exp/labels.jpg... \n",
"Image sizes 640 train, 640 val\n",
"Using 2 dataloader workers\n",
"Logging results to \u001b[1mruns/train/exp\u001b[0m\n",
"Starting training for 3 epochs...\n",
"\n",
" Epoch GPU_mem box_loss obj_loss cls_loss Instances Size\n",
" 0/2 0G 0.08917 0.04332 0.09752 43 640: 100% 234/234 [1:32:34<00:00, 23.74s/it]\n",
" Class Images Instances P R mAP50 mAP50-95: 100% 127/127 [36:36<00:00, 17.30s/it]\n",
" all 4033 15159 0.00765 0.571 0.00917 0.00362\n",
"\n",
" Epoch GPU_mem box_loss obj_loss cls_loss Instances Size\n",
" 1/2 0G 0.04627 0.03401 0.0956 42 640: 100% 234/234 [1:32:10<00:00, 23.64s/it]\n",
" Class Images Instances P R mAP50 mAP50-95: 100% 127/127 [33:53<00:00, 16.01s/it]\n",
" all 4033 15159 0.0113 0.895 0.0142 0.0071\n",
"\n",
" Epoch GPU_mem box_loss obj_loss cls_loss Instances Size\n",
" 2/2 0G 0.03428 0.03075 0.09459 33 640: 100% 234/234 [1:32:29<00:00, 23.72s/it]\n",
" Class Images Instances P R mAP50 mAP50-95: 100% 127/127 [35:07<00:00, 16.59s/it]\n",
" all 4033 15159 0.0116 0.919 0.0149 0.00975\n",
"\n",
"3 epochs completed in 6.383 hours.\n",
"Optimizer stripped from runs/train/exp/weights/last.pt, 14.7MB\n",
"Optimizer stripped from runs/train/exp/weights/best.pt, 14.7MB\n",
"\n",
"Validating runs/train/exp/weights/best.pt...\n",
"Fusing layers... \n",
"Model summary: 157 layers, 7150369 parameters, 0 gradients, 16.2 GFLOPs\n",
" Class Images Instances P R mAP50 mAP50-95: 61% 78/127 [22:11<14:06, 17.28s/it]"
]
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "-GdpCP0TItC5"
},
"execution_count": null,
"outputs": []
}
]
}
Loading