-
Notifications
You must be signed in to change notification settings - Fork 2
/
algo_benchmark.py
148 lines (133 loc) · 4.64 KB
/
algo_benchmark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
"""
Documentation:
to be removed
"""
from multiprocessing import Process
import argparse
import datetime
import time
import config
import main as algorithm
import eval
DEFAULT_NB_PARALLEL_EXECUTIONS = 1
DEFAULT_NB_EXECUTIONS_PER_GRID_SIZE = 4
processes_pool = []
running_processes_pool = []
resolution_time_records = []
def parse_arguments():
"""
Documentation
"""
parser = argparse.ArgumentParser(description='Eternity II algortihm benchmarking')
parser.add_argument('--nb-parallel-executions', type=int,
default=DEFAULT_NB_PARALLEL_EXECUTIONS,
help='Number of parallel executions at the same time')
parser.add_argument('--nb-executions-per-grid-size', type=int,
default=DEFAULT_NB_EXECUTIONS_PER_GRID_SIZE,
help='Number of algorithm executions per grid size')
return parser.parse_args()
def prepare_grid_benchmark(input_grid, nb_executions):
"""
Documentation
"""
for i in range(nb_executions):
process = Process(target=algorithm.main, args=(False,), kwargs={'old_pop':False, 'timer':None, 'nloop':config.NGEN, 'timed':False, 'input_grid':input_grid})
processes_pool.append((process, input_grid))
def record_process_start():
"""
Documentation
"""
resolution_time_records.append(datetime.datetime.now())
def record_process_end(process_index):
"""
Documentation
"""
current_time = datetime.datetime.now()
start_time = resolution_time_records[process_index]
resolution_time = current_time - start_time
resolution_time_records[process_index] = (resolution_time, processes_pool[process_index][1])
print process_index, "\t|", processes_pool[process_index][1], "\t| resolution time:", resolution_time
def find_ended_processes(nb_current_executions):
"""
Documentation
"""
for process_index in running_processes_pool:
if not processes_pool[process_index][0].is_alive():
record_process_end(process_index)
nb_current_executions -= 1
running_processes_pool.remove(process_index)
return nb_current_executions
def launch_benchmark(nb_executions):
"""
Documentation
"""
nb_current_executions = 0
for i, process_data in enumerate(processes_pool):
process = process_data[0]
process_config = process_data[1]
while nb_current_executions == nb_executions:
nb_current_executions = find_ended_processes(nb_current_executions)
time.sleep(0.01)
eval.init_virgin_scores_list()
running_processes_pool.append(i)
process.start()
record_process_start()
nb_current_executions += 1
while nb_current_executions != 0:
nb_current_executions = find_ended_processes(nb_current_executions)
time.sleep(0.01)
def compute_stats_from_records():
"""
Documentation
"""
input_grids = []
for resolution_time, input_grid in resolution_time_records:
if not input_grid in input_grids:
input_grids.append(input_grid)
for input_grid in input_grids:
grid_records = []
for resolution_time, input_grid_used in resolution_time_records:
if input_grid == input_grid_used:
grid_records.append(resolution_time)
min, avg, max = None, None, None
nb_records = 0
for grid_record in grid_records:
nb_records += 1
if min == None:
min = grid_record
avg = grid_record
max = grid_record
else:
if min > grid_record:
min = grid_record
elif max < grid_record:
max = grid_record
avg += grid_record
avg /= nb_records
print input_grid, "\t| min:", min, "avg:", avg, "max:", max
def main(args):
"""
Documentation
"""
print "nb_parallel_executions", args.nb_parallel_executions
print "nb_executions_per_grid_size", args.nb_executions_per_grid_size
prepare_grid_benchmark("test_4pieces.txt", args.nb_executions_per_grid_size)
# our algorithm currently does not solve 3x3 puzzle
prepare_grid_benchmark("test_9pieces.txt", args.nb_executions_per_grid_size)
# prepare_grid_benchmark("test_16pieces.txt", args.nb_executions_per_grid_size)
print "\nlaunching benchmark..."
launch_benchmark(args.nb_parallel_executions)
print "\ncomputing stats from benchmark records..."
compute_stats_from_records()
__all__ = [
"main"
]
__md__ = [
"main"
]
if __name__ == '__main__':
"""
Doc
"""
args = parse_arguments()
main(args)