/
cifar10.py
126 lines (101 loc) · 4.46 KB
/
cifar10.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import argparse
import os
from typing import Any, Optional
from tqdm import tqdm
# Mute Tensorflow logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf # noqa
from tensorflow import keras # noqa
from evolution.encoding.base import BatchNorm # noqa
from evolution.encoding.base import Dense # noqa
from evolution.encoding.base import DepthwiseConv2D # noqa
from evolution.encoding.base import Dropout # noqa
from evolution.encoding.base import Flatten # noqa
from evolution.encoding.base import IdentityOperation # noqa
from evolution.encoding.base import MaxPool2D # noqa
from evolution.encoding.base import PointConv2D # noqa
from evolution.encoding.base import ReLU # noqa
from evolution.encoding.base import SeparableConv2D # noqa
from evolution.encoding.base import Vertex # noqa
from evolution.encoding.fixed_edge import FixedEdge # noqa
from evolution.encoding.mutable_edge import MutableEdge # noqa
from evolution.evolve.evolve_strategy import AgingEvolution # noqa
from evolution.evolve.mutation_strategy import MutateOneLayer # noqa
from evolution.train.trainer import ParallelTrainer # noqa
batch_size = 32
num_classes = 10
epochs = 20
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
class TopLayer(FixedEdge):
def __init__(self) -> None:
super().__init__(name='TopLayer')
def construct_new_instance(self) -> 'FixedEdge':
return TopLayer()
def build_graph(self) -> None:
conv_edge1 = MutableEdge((BatchNorm(),
PointConv2D((20, 40)), DepthwiseConv2D(),
IdentityOperation(),
SeparableConv2D((20, 40)), Dropout(0.25),
ReLU()), max_vertices=10,
initialize_with_identity=False)
conv_edge2 = conv_edge1.deep_copy()
vertex1 = Vertex(name='V1')
vertex2 = Vertex(name='V2')
self.input_vertex.add_edge(conv_edge1, vertex1)
vertex7 = Vertex(name='V7')
vertex1.add_edge(MaxPool2D(), vertex7)
vertex7.add_edge(conv_edge2, vertex2)
vertex3 = Vertex(name='V3')
vertex2.add_edge(Flatten(), vertex3)
vertex4 = Vertex(name='V4')
vertex3.add_edge(Dense(512), vertex4)
vertex5 = Vertex(name='V5')
vertex4.add_edge(ReLU(), vertex5)
vertex6 = Vertex(name='V6')
vertex5.add_edge(Dropout(0.5), vertex6)
vertex6.add_edge(Dense(num_classes), self.output_vertex)
def build(self, x: tf.Tensor) -> tf.Tensor:
logit = super().build(x)
return keras.layers.Activation('softmax')(logit)
class Cifar10ParallelTrainer(ParallelTrainer):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
def optimizer_factory(self) -> keras.optimizers.Optimizer:
return keras.optimizers.RMSprop(lr=1e-4, decay=1e-6)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Example for evolving a neural net on cifar10 dataset')
parser.add_argument('-p', type=int, default=20, help='Population size')
parser.add_argument('-i', type=int, default=30,
help='Number of evolution iterations')
parser.add_argument('-s', type=int, default=5,
help='Sample how many individuals in each iteration')
parser.add_argument('-o', type=str, required=True,
help='Log directory path')
args: Optional[argparse.Namespace] = None
try:
args = parser.parse_args()
except argparse.ArgumentError:
parser.print_help()
exit(1)
train_eval_args = {
'k_folds': 3,
'num_process': 3,
'x_train': x_train,
'y_train': y_train,
'x_valid': x_test,
'y_valid': y_test,
'fit_args': {'batch_size': batch_size,
'epochs': epochs,
'shuffle': True,
'verbose': 0},
'loss': 'categorical_crossentropy',
'metrics': 'accuracy',
'log_dir': args.o}
aging_evolution = AgingEvolution(args.p, args.i, args.s, TopLayer(),
MutateOneLayer(),
Cifar10ParallelTrainer(**train_eval_args))
model, performance = aging_evolution.run()
tqdm.write('Best model performance: %.03f' % performance)