-
Notifications
You must be signed in to change notification settings - Fork 28
/
process_results
executable file
·128 lines (97 loc) · 5.71 KB
/
process_results
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#!/usr/bin/env python3
"""
Runs several calibration executions in batch_execution. A yml file is used to config the batch executions.
"""
import argparse
import glob
import os
import shutil
from colorama import Fore, Style
from atom_core.system import resolvePath
import pandas as pd
def averageCsvFiles(filenames, average_row, column_to_check):
# Get a list of the pandas representation of the csvs
pandas_text_df = pd.read_csv(filenames[0])
pandas_dfs = []
for filename_idx, filename in enumerate(filenames):
df = pd.read_csv(filename)
# Filter all cells for averages
df = df[df[column_to_check].isin([average_row])]
print(df)
df = df.apply(pd.to_numeric, errors="coerce")
pandas_dfs.append(df)
# Concatenate and average all numeric cells
g = pd.concat(pandas_dfs).mean()
# replace the text cells with
g.fillna('Averages', inplace=True)
return g
def main():
ap = argparse.ArgumentParser() # Parse command line arguments
ap.add_argument("-v", "--verbose", help="Prints the stdout_data of each command to the terminal.",
action='store_true', default=False)
ap.add_argument("-ow", "--overwrite", help="Overwrites output folder.",
action='store_true', default=False)
ap.add_argument("-rf", "--results_folder", help="Folder containing the results",
required=True, type=str)
ap.add_argument("-of", "--output_folder", help="Folder where to store the processed results",
required=True, type=str)
ap.add_argument("-rs", "--run_suffix", help="Suffix used to signal multiple runs of the same experiment.",
required=False, default='_run', type=str)
ap.add_argument("-fs", "--fold_suffix", help="Suffix used to signal multiple folds of the same run.",
required=False, default='_fold', type=str)
ap.add_argument("-ar", "--average_row", help="Name of the row to average. Default is 'Averages'",
required=False, default='Averages', type=str)
ap.add_argument("-ctc", "--column_to_check", help="Name of the column to check for the average row. Default is 'Collection #', which is the case for ATOM result processing.",
required=False, default='Collection #', type=str)
args = vars(ap.parse_args())
args['output_folder'] = resolvePath(args['output_folder'])
if not os.path.exists(args['output_folder']): # create stdout_data folder if it does not exist.
# bprint('Creating output folder: ' + config['output_folder'])
os.mkdir(args['output_folder']) # Create the new folder
elif os.path.exists(args['output_folder']) and args['overwrite']:
# bprint('Overwriting output folder: ' + config['output_folder'])
shutil.rmtree(args['output_folder']) # Create the new folder
os.mkdir(args['output_folder']) # Create the new folder
print('Average row: ' + args['average_row'])
# We need to get the experiments from the set of folders (runs) in the results_folder. An experiment is a set of one or more runs, with filenames <experiment_name>_r1
# Create a list of experiments
files_and_folders = os.listdir(args['results_folder'])
folders = [x for x in files_and_folders if os.path.isdir(args['results_folder'] + '/' + x)]
fold_suffix_size = len(args['fold_suffix']) + 3 # because the suffix is complemented by the run number as in 001
run_suffix_size = len(args['run_suffix']) + 3 # because the suffix is complemented by the run number as in 001
suffix_size = 0
if args['fold_suffix'] in folders[0]:
suffix_size += fold_suffix_size
if args['run_suffix'] in folders[0]:
suffix_size += run_suffix_size
experiments = list(set([x[:-suffix_size] for x in folders])) # Remove the "_foldXX" suffix
# files_to_process = ['comparison_to_ground_truth_transforms.csv', 'comparison_to_ground_truth_joints.csv']
# files_to_process = ['single_rgb_evaluation.csv']
abspath = os.listdir(os.path.abspath(args['results_folder']) + '/' + folders[0])
filenames = abspath
files_to_process = [filename for filename in filenames if filename.endswith('.csv')]
print(files_to_process)
for experiment in experiments:
print('Averaging experiment ' + Fore.BLUE + experiment + Style.RESET_ALL)
experiment_folder = args['output_folder'] + '/' + experiment
if not os.path.exists(experiment_folder): # create stdout_data folder if it does not exist.
# bprint('Creating output folder: ' + config['output_folder'])
os.mkdir(experiment_folder) # Create the new folder
elif os.path.exists(experiment_folder) and args['overwrite']:
# bprint('Overwriting output folder: ' + config['output_folder'])
shutil.rmtree(experiment_folder) # Create the new folder
os.mkdir(experiment_folder) # Create the new folder
# get all runs in this experiment
files_and_folders = glob.glob(args['results_folder'] + '/' + experiment + '*')
files_and_folders.sort()
for file_to_process in files_to_process:
filenames = [run + '/' + file_to_process for run in files_and_folders]
average_df = averageCsvFiles(filenames=filenames,
average_row=args['average_row'],
column_to_check=args['column_to_check'])
average_df = average_df.drop(columns=[args['column_to_check']])
output_file = args['output_folder'] + '/' + experiment + '/' + file_to_process
print('Saving average to ' + Fore.BLUE + output_file + Style.RESET_ALL)
average_df.to_csv(output_file)
if __name__ == "__main__":
main()