-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_validator.py
116 lines (82 loc) · 4.61 KB
/
run_validator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import matplotlib as mpl
mpl.use('agg')
import os
import pandas as pd
import matplotlib.pyplot as plt
from itertools import product
from catalog import Source, Binary, Catalog, Pink
from experiment_options import BINARY_OPTS, PINK_OPTS, LEARNING_MODES
def FIRST_Fraction(CHANNELS=[['FIRST']],
PROJECT_DIR = 'Script_Experiments_Fraction_Learning',
TRIAL=0):
REALISATIONS = 1000
for bin_opts, pink_opts, channels in product(BINARY_OPTS, PINK_OPTS, CHANNELS):
# print(bin_opts, pink_opts)
# Make sure there are enough channels to do the mask with
if len(CHANNELS[0]) == 1 and bin_opts['convex']:
print(f'Skipping this option set. CHANNELS is {CHANNELS[0]}, nothing to apply convex hull masking to. ')
continue
chan_name = '_'.join(channels)
out_dir = f"{PROJECT_DIR}/{chan_name}_{bin_opts['project_dir']}_{pink_opts['som-width']}x{pink_opts['som-height']}_Trial{TRIAL}"
results = []
if not os.path.exists(f'{out_dir}/trained.pink'):
print(f'PINK not found: {out_dir}')
else:
print(f'\n\nLoading in saved Pink instance: {out_dir}')
pink = Pink.loader(f'{out_dir}/trained.pink')
for i, t in enumerate(pink.binary):
print('\tRunning Validator')
pink.weight_test(SOM_mode=i, realisations=100)
result = pink.validator(SOM_mode=i, realisations=100, weights=True, pack=True)
print('\t', result['total_accuracy'])
results.append(result)
result = pink.validator(SOM_mode=i, realisations=100, pack=True)
print('\t', result['total_accuracy'])
results.append(result)
result = pink.validator(SOM_mode=i, pack=True)
print('\t', result['total_accuracy'], result['total_correct']+result['total_wrong'])
results.append(result)
result = pink.prob_validator(SOM_mode=i, realisations=100, weights=True, pack=True)
print('\t', result['total_accuracy'], result['total_correct']+result['total_wrong'])
results.append(result)
# result = pink.validator(SOM_mode=i, realisations=100)
# print(result['total_accuracy'], result['total_correct']+result['total_wrong'])
# result = pink.validator(SOM_mode=i, realisations=100, weights=True)
# print(result['total_accuracy'], result['total_correct']+result['total_wrong'])
df = pd.DataFrame(results)
df.to_json(f'{pink.project_dir}/Weighted_Results.json')
# for i, t in enumerate(pink.binary):
# try:
# pink.map(mode=i)
# pink.map(mode='validate', SOM_mode=i)
# pink.save('trained.pink')
# pink.show_som(channel=0, mode=i)
# pink.show_som(channel=0, mode=i, plt_mode='split')
# pink.show_som(channel=0, mode=i, plt_mode='grid')
# pink.show_som(channel=1, mode=i)
# pink.show_som(channel=1, mode=i, plt_mode='split')
# pink.show_som(channel=1, mode=i, plt_mode='grid')
# pink.attribute_heatmap(save=f'train_{i}_labels_dist.pdf', mode=i)
# pink.attribute_heatmap(save=f'train_{i}_MC{REALISATIONS}_labels_dist.pdf', mode=i, realisations=REALISATIONS)
# pink.count_map(mode=i, save=f'train_{i}_count_map.pdf')
# pink.count_map(mode='validate', SOM_mode=i, save=f'validate_{i}_count_map.pdf')
# validation_res = pink.validator(SOM_mode=i)
# results.append(validation_res)
# except Exception as e:
# print('Try caught something')
# print(e)
# import traceback
# traceback.print_exc()
plt.close('all')
# df = pd.DataFrame(results)
# df.to_json(f'{pink.project_dir}/FIRST_Results.json')
if __name__ == '__main__':
import socket
hostname = socket.gethostname()
batch1 = [i for i in range(0,10)]
FRAC_DIR = 'Script_Experiments_Fractions_Trials_Learning'
# ----------------------------------------------------------
for i in batch1:
FIRST_Fraction(TRIAL=i, PROJECT_DIR=FRAC_DIR)
FIRST_Fraction(CHANNELS=[['FIRST','WISE_W1']],
TRIAL=i, PROJECT_DIR=FRAC_DIR)