-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathrun_this_benchmark_detection.py
81 lines (67 loc) · 3.22 KB
/
run_this_benchmark_detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
if __name__ == "__main__":
# from unittest import result
import importlib
from src.methods import *
from mcsm_benchs.benchmark_utils import MethodTemplate as MethodTemplate
import time
import inspect
# Collects the methods in the folder/ module "methods" and make a global list of them.
print('Collecting methods to benchmark...')
modules = dir()
modules = [mod_name for mod_name in modules if mod_name.startswith('method_')]
global list_of_methods # Use with caution.
list_of_methods = list()
for mod_name in modules:
mod = importlib.import_module('src.methods.' + mod_name)
classes_in_mod = inspect.getmembers(mod, inspect.isclass)
for a_class in classes_in_mod:
method_class = getattr(mod, a_class[0])
class_parent = method_class.__bases__[0]
if class_parent == MethodTemplate:
method_name = method_class().id
print(method_name)
list_of_methods.append(method_class())
from mcsm_benchs.Benchmark import Benchmark
import numpy as np
from mcsm_benchs.ResultsInterpreter import ResultsInterpreter
import yaml
import pickle
import os
dictionary_of_methods = dict()
dictionary_of_parameters = dict()
# Select only methods for denoising.
for method_instance in list_of_methods:
if method_instance.task == 'detection':
method_id = method_instance.id
dictionary_of_methods[method_id] = method_instance.method
dictionary_of_parameters[method_id] = method_instance.get_parameters()
# Parameters of the benchmark:
# Load parameters from configuration file.
with open("config_detection.yaml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['methods'] = dictionary_of_methods
config['parameters'] = dictionary_of_parameters
config['task'] = 'detection'
if 'add_new_methods' in config.keys():
if config['add_new_methods']:
filename = 'results\last_benchmark_detection'
with open(filename + '.pkl', 'rb') as f:
benchmark = pickle.load(f)
benchmark.add_new_method(config['methods'],config['parameters'])
else:
config.pop('add_new_methods')
benchmark = Benchmark(**config)
else:
benchmark = Benchmark(**config)
start = time.time()
my_results = benchmark.run_test() # Run the test. my_results is a nested dictionary with the results for each of the variables of the simulation.
end = time.time()
print("The time of execution:", end-start)
df = benchmark.get_results_as_df() # This formats the results on a DataFrame
print(df)
# Save the benchmark to a file. Notice that only the methods_ids are saved.
benchmark.save_to_file(filename = 'results/last_benchmark_detection')
# results_interpreter = ResultsInterpreter(benchmark)
# results_interpreter.save_report(path='results')
# results_interpreter.get_html_figures(path='results/detection/figures/html',bars=True)
# results_interpreter.get_csv_files(path='results/detection/csv_files')