Skip to content
Permalink
Browse files

merge search to pymoo

  • Loading branch information...
mikelzc1990 committed May 4, 2019
1 parent 07597c8 commit 408ac80a5ac839ac9ca3c0fd8df02d90a50d4012
Showing with 1,200 additions and 6,161 deletions.
  1. BIN img/pf_micro.gif
  2. +93 −77 orginal_implementation/evolution/calculate_flops.py → misc/flops_counter.py
  3. 0 {validation → misc}/utils.py
  4. +1 −1 models/macro_decoder.py
  5. +4 −0 models/macro_genotypes.py
  6. +1 −1 models/micro_models.py
  7. +0 −77 orginal_implementation/bad_grad_viz.py
  8. +0 −32 orginal_implementation/checkpoints.py
  9. +0 −323 orginal_implementation/dataloader.py
  10. +0 −6 orginal_implementation/datasets/__init__.py
  11. +0 −133 orginal_implementation/datasets/filelist.py
  12. +0 −196 orginal_implementation/datasets/folderlist.py
  13. +0 −14 orginal_implementation/datasets/loaders.py
  14. +0 −342 orginal_implementation/datasets/transforms.py
  15. +0 −3 orginal_implementation/evaluate/__init__.py
  16. +0 −21 orginal_implementation/evaluate/classification.py
  17. +0 −6 orginal_implementation/evolution/__init__.py
  18. +0 −170 orginal_implementation/evolution/check_duplicates.py
  19. +0 −124 orginal_implementation/evolution/connectivity_matrix.py
  20. +0 −1,123 orginal_implementation/evolution/decoder.py
  21. +0 −302 orginal_implementation/evolution/residual_decoder.py
  22. +0 −236 orginal_implementation/evolution/variable_decoder.py
  23. +0 −4 orginal_implementation/losses/__init__.py
  24. +0 −14 orginal_implementation/losses/classification.py
  25. +0 −15 orginal_implementation/losses/regression.py
  26. +0 −25 orginal_implementation/losses/uncertainty.py
  27. +0 −88 orginal_implementation/main.py
  28. +0 −104 orginal_implementation/main_extra.py
  29. +0 −68 orginal_implementation/model.py
  30. +0 −5 orginal_implementation/models/__init__.py
  31. +0 −122 orginal_implementation/models/evonetwork.py
  32. +0 −81 orginal_implementation/models/model_utils.py
  33. +0 −104 orginal_implementation/nsga2_config.py
  34. +0 −852 orginal_implementation/nsga2_engine.py
  35. +0 −210 orginal_implementation/nsga2_individual.py
  36. +0 −119 orginal_implementation/nsga2_main.py
  37. +0 −136 orginal_implementation/parallel_main.py
  38. +0 −7 orginal_implementation/plugins/__init__.py
  39. +0 −81 orginal_implementation/plugins/backprop_visualizer.py
  40. +0 −218 orginal_implementation/plugins/genome_visualizer.py
  41. +0 −49 orginal_implementation/plugins/hypervolume.py
  42. +0 −29 orginal_implementation/plugins/image.py
  43. +0 −32 orginal_implementation/plugins/logger.py
  44. +0 −37 orginal_implementation/plugins/monitor.py
  45. +0 −105 orginal_implementation/plugins/visualizer.py
  46. +0 −120 orginal_implementation/test.py
  47. +0 −183 orginal_implementation/train.py
  48. +0 −52 orginal_implementation/utils.py
  49. +65 −23 orginal_implementation/datasets/cifar.py → search/cifar10_search.py
  50. +165 −0 search/evolution_search.py
  51. +59 −0 search/macro_encoding.py
  52. +167 −0 search/micro_encoding.py
  53. +218 −0 search/nsganet.py
  54. +307 −0 search/train_search.py
  55. +3 −4 validation/test.py
  56. +3 −4 validation/train.py
  57. +0 −81 visualization/backprop_visualizer.py
  58. +12 −2 visualization/{genome_visualizer.py → macro_visualize.py}
  59. +102 −0 visualization/micro_visualize.py
BIN +294 KB img/pf_micro.gif
Binary file not shown.
@@ -1,61 +1,32 @@
import torch.nn as nn
import torch
import numpy as np

# Original implementation:
# https://github.com/warmspringwinds/pytorch-segmentation-detection/blob/master/pytorch_segmentation_detection/utils/flops_benchmark.py

# ---- Public functions
def flops_to_string(flops):
if flops // 10**9 > 0:
return str(round(flops / 10.**9, 2)) + 'GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, 2)) + 'MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, 2)) + 'KMac'
return str(flops) + 'Mac'

def add_flops_counting_methods(net_main_module):
"""Adds flops counting functions to an existing model. After that
the flops count should be activated and the model should be run on an input
image.
Example:
fcn = add_flops_counting_methods(fcn)
fcn = fcn.cuda().train()
fcn.start_flops_count()
_ = fcn(batch)
fcn.compute_average_flops_cost() / 1e9 / 2 # Result in GFLOPs per image in batch
Important: dividing by 2 only works for resnet models -- see below for the details
of flops computation.
Attention: we are counting multiply-add as two flops in this work, because in
most resnet models convolutions are bias-free (BN layers act as bias there)
and it makes sense to count muliply and add as separate flops therefore.
This is why in the above example we divide by 2 in order to be consistent with
most modern benchmarks. For example in "Spatially Adaptive Computatin Time for Residual
Networks" by Figurnov et al multiply-add was counted as two flops.

This module computes the average flops which is necessary for dynamic networks which
have different number of executed layers. For static networks it is enough to run the network
once and get statistics (above example).
def get_model_parameters_number(model, as_string=True):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
if not as_string:
return params_num

Implementation:
The module works by adding batch_count to the main module which tracks the sum
of all batch sizes that were run through the network.
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 2)) + 'M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 2)) + 'k'

Also each convolutional layer of the network tracks the overall number of flops
performed.
return str(params_num)

The parameters are updated with the help of registered hook-functions which
are being called each time the respective layer is executed.
Parameters
----------
net_main_module : torch.nn.Module
Main module containing network
Returns
-------
net_main_module : torch.nn.Module
Updated main module with new methods/attributes that are used
to compute flops.
"""

def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
@@ -65,7 +36,7 @@ def add_flops_counting_methods(net_main_module):

net_main_module.reset_flops_count()

# Adding varialbles necessary for masked flops computation
# Adding variables necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)

return net_main_module
@@ -81,12 +52,9 @@ def compute_average_flops_cost(self):
"""

batches_count = self.__batch_counter__

flops_sum = 0

for module in self.modules():

if isinstance(module, torch.nn.Conv2d):
if is_supported_instance(module):
flops_sum += module.__flops__

return flops_sum / batches_count
@@ -101,9 +69,7 @@ def start_flops_count(self):
Call it before you run the network.
"""

add_batch_counter_hook_function(self)

self.apply(add_flops_counter_hook_function)


@@ -116,9 +82,7 @@ def stop_flops_count(self):
Call whenever you want to pause the computation.
"""

remove_batch_counter_hook_function(self)

self.apply(remove_flops_counter_hook_function)


@@ -130,17 +94,14 @@ def reset_flops_count(self):
Resets statistics computed so far.
"""

add_batch_counter_variables_or_reset(self)

self.apply(add_flops_counter_variable_or_reset)


def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask

module.apply(add_flops_mask_func)


@@ -149,8 +110,53 @@ def remove_flops_mask(module):


# ---- Internal functions
def is_supported_instance(module):
if isinstance(module, (torch.nn.Conv2d, torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
torch.nn.LeakyReLU, torch.nn.ReLU6, torch.nn.Linear, \
torch.nn.MaxPool2d, torch.nn.AvgPool2d, torch.nn.BatchNorm2d, \
torch.nn.Upsample, nn.AdaptiveMaxPool2d, nn.AdaptiveAvgPool2d)):
return True

return False


def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0


def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += output_elements_count


def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += active_elements_count


def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += batch_size * input.shape[1] * output.shape[1]


def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += np.prod(input.shape)

def bn_flops_counter_hook(module, input, output):
module.affine
input = input[0]

batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += batch_flops

def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
@@ -161,9 +167,10 @@ def conv_flops_counter_hook(conv_module, input, output):
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups

# We count multiply-add as 2 flops
conv_per_position_flops = 2 * kernel_height * kernel_width * in_channels * out_channels
filters_per_channel = out_channels // groups
conv_per_position_flops = kernel_height * kernel_width * in_channels * filters_per_channel

active_elements_count = batch_size * output_height * output_width

@@ -177,6 +184,7 @@ def conv_flops_counter_hook(conv_module, input, output):
bias_flops = 0

if conv_module.bias is not None:

bias_flops = out_channels * active_elements_count

overall_flops = overall_conv_flops + bias_flops
@@ -187,13 +195,12 @@ def conv_flops_counter_hook(conv_module, input, output):
def batch_counter_hook(module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]

batch_size = input.shape[0]

module.__batch_counter__ += batch_size


def add_batch_counter_variables_or_reset(module):

module.__batch_counter__ = 0


@@ -208,38 +215,47 @@ def add_batch_counter_hook_function(module):
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()

del module.__batch_counter_handle__


def add_flops_counter_variable_or_reset(module):
if isinstance(module, torch.nn.Conv2d):
if is_supported_instance(module):
module.__flops__ = 0


def add_flops_counter_hook_function(module):
if isinstance(module, torch.nn.Conv2d):

if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return

handle = module.register_forward_hook(conv_flops_counter_hook)
if isinstance(module, torch.nn.Conv2d):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
torch.nn.LeakyReLU, torch.nn.ReLU6)):
handle = module.register_forward_hook(relu_flops_counter_hook)
elif isinstance(module, torch.nn.Linear):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d, \
nn.AdaptiveAvgPool2d)):
handle = module.register_forward_hook(pool_flops_counter_hook)
elif isinstance(module, torch.nn.BatchNorm2d):
handle = module.register_forward_hook(bn_flops_counter_hook)
elif isinstance(module, torch.nn.Upsample):
handle = module.register_forward_hook(upsample_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle


def remove_flops_counter_hook_function(module):
if isinstance(module, torch.nn.Conv2d):

if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()

del module.__flops_handle__


# --- Masked flops counting


# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = None
if is_supported_instance(module):
module.__mask__ = None
File renamed without changes.
@@ -61,7 +61,7 @@ def __init__(self, list_genome, channels, repeats=None):
if not self._genome:
self._model = Identity()

print(list_genome)
# print(list_genome)

def adjust_for_repeats(self, repeats):
"""
@@ -0,0 +1,4 @@
NSGANet = [[[1], [0, 0], [0, 1, 0], [0, 1, 1, 1], [1, 0, 0, 1, 1], [0]],
[[0], [0, 0], [0, 1, 0], [0, 1, 0, 1], [1, 1, 1, 1, 1], [0]],
[[0], [0, 1], [1, 0, 1], [1, 0, 1, 1], [1, 0, 0, 1, 1], [0]]]

@@ -1,5 +1,5 @@
from models.micro_operations import *
from validation.utils import drop_path
from misc.utils import drop_path


DEFAULT_PADDINGS = {

This file was deleted.

0 comments on commit 408ac80

Please sign in to comment.
You can’t perform that action at this time.