Skip to content

Commit

Permalink
Merge pull request ayooshkathuria#48 from id9502/master
Browse files Browse the repository at this point in the history
made some bug fixing and added support for tiny Yolo-v3
  • Loading branch information
eriklindernoren committed Sep 29, 2018
2 parents 959e0ff + df1db78 commit 7caaafb
Show file tree
Hide file tree
Showing 3 changed files with 223 additions and 3 deletions.
206 changes: 206 additions & 0 deletions config/yolov3-tiny.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
[net]
# Testing
batch=1
subdivisions=1
# Training
# batch=64
# subdivisions=2
width=416
height=416
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1

learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1

# 0
[convolutional]
batch_normalize=1
filters=16
size=3
stride=1
pad=1
activation=leaky

# 1
[maxpool]
size=2
stride=2

# 2
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky

# 3
[maxpool]
size=2
stride=2

# 4
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

# 5
[maxpool]
size=2
stride=2

# 6
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky

# 7
[maxpool]
size=2
stride=2

# 8
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

# 9
[maxpool]
size=2
stride=2

# 10
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

# 11
[maxpool]
size=2
stride=1

# 12
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky

###########

# 13
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

# 14
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

# 15
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear



# 16
[yolo]
mask = 3,4,5
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1

# 17
[route]
layers = -4

# 18
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

# 19
[upsample]
stride=2

# 20
[route]
layers = -1, 8

# 21
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

# 22
[convolutional]
size=1
stride=1
pad=1
filters=255
activation=linear

# 23
[yolo]
mask = 1,2,3
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=80
num=6
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
17 changes: 14 additions & 3 deletions models.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def create_modules(module_defs):
Constructs module list of layer blocks from module configuration in module_defs
"""
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams['channels'])]
output_filters = []
module_list = nn.ModuleList()
for i, module_def in enumerate(module_defs):
modules = nn.Sequential()
Expand All @@ -30,7 +30,8 @@ def create_modules(module_defs):
filters = int(module_def['filters'])
kernel_size = int(module_def['size'])
pad = (kernel_size - 1) // 2 if int(module_def['pad']) else 0
modules.add_module('conv_%d' % i, nn.Conv2d(in_channels=output_filters[-1],
modules.add_module('conv_%d' % i, nn.Conv2d(in_channels=(output_filters[-1] if len(output_filters) > 0
else int(hyperparams['channels'])),
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def['stride']),
Expand All @@ -41,6 +42,16 @@ def create_modules(module_defs):
if module_def['activation'] == 'leaky':
modules.add_module('leaky_%d' % i, nn.LeakyReLU(0.1))

elif module_def['type'] == 'maxpool':
kernel_size = int(module_def['size'])
stride = int(module_def['stride'])
if kernel_size == 2 and stride == 1:
padding = nn.ZeroPad2d((0, 1, 0, 1))
modules.add_module('_debug_padding_%d' % i, padding)
maxpool = nn.MaxPool2d(kernel_size=int(module_def['size']),
stride=int(module_def['stride']), padding=int((kernel_size-1)//2))
modules.add_module('maxpool_%d' % i, maxpool)

elif module_def['type'] == 'upsample':
upsample = nn.Upsample( scale_factor=int(module_def['stride']),
mode='nearest')
Expand Down Expand Up @@ -192,7 +203,7 @@ def forward(self, x, targets=None):
self.losses = defaultdict(float)
layer_outputs = []
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if module_def['type'] in ['convolutional', 'upsample']:
if module_def['type'] in ['convolutional', 'upsample', 'maxpool']:
x = module(x)
elif module_def['type'] == 'route':
layer_i = [int(x) for x in module_def['layers'].split(',')]
Expand Down
3 changes: 3 additions & 0 deletions weights/download_weights_tiny.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#! /usr/bin/env bash

wget https://pjreddie.com/media/files/yolov3-tiny.weights

0 comments on commit 7caaafb

Please sign in to comment.