-
Notifications
You must be signed in to change notification settings - Fork 5
/
commandParser.py
138 lines (125 loc) · 3.85 KB
/
commandParser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
from utils import logger
from PyInquirer import prompt
from config import datasets, models, fusions, evaluationMetrics
from config import defaultModel, defaultDataset, defaultFusion, defaultEvaluation, isInteractive
modelChoices = []
fusionChoices = []
datasetChoices = []
evaluatorChoices = []
def initChoices():
"""
Preparing choices for the questions
Parameters
----------
None
Returns
-------
None
"""
# Preparing model items
for model in models:
modelChoices.append(model)
# Preparing dataset items
for dataset in datasets:
datasetChoices.append(dataset)
# Preparing fusion items
for fusion in fusions:
fusionChoices.append(fusion)
# Preparing evaluation metrics items
for evaluator in evaluationMetrics:
evaluatorChoices.append({'name': evaluator})
def interactiveCommandForm():
"""
Generating the interactive form for the user to select the parameters
Parameters
----------
None
Returns
-------
userInputs: dict
Dictionary containing the user inputs
"""
# Initiate choices
initChoices()
# Appy choices to the questions
questions = [
{
'type': 'list',
'name': 'Model',
'message': 'Choose the model you need:',
'choices': modelChoices
},
{
'type': 'list',
'name': 'Dataset',
'message': 'Choose the dataset you need:',
'choices': datasetChoices
},
{
'type': 'list',
'name': 'Fusion',
'message': 'Choose the fusion you need:',
'choices': fusionChoices
},
{
'type': 'checkbox',
'name': 'Evaluation',
'message': 'Choose at least one evaluation metric:',
'choices': evaluatorChoices
},
{
'type': 'confirm',
'message': 'Do you confirm your selected choices?',
'name': 'Confirmation',
'default': True,
},
]
# Showing the selected items to the user
userInputs = prompt(questions)
return userInputs
def getUserChoices():
"""
Getting the user inputs and validating them
Parameters
----------
None
Returns
-------
userInputs: dict
Dictionary containing the user inputs
"""
if (isInteractive):
userInputs = interactiveCommandForm()
else:
userInputs = {
'Model': defaultModel,
'Dataset': defaultDataset,
'Fusion': defaultFusion,
'Evaluation': defaultEvaluation,
'Confirmation': True,
'Ignored': []
}
# Validating user inputs
confirmation = userInputs['Confirmation']
if (confirmation == True):
print('Validating your choices ...')
selectedModelScopes = models[userInputs['Model']]
selectedDatasetScopes = datasets[userInputs['Dataset']]
ignoredContexts = []
# Checking if dataset covers all scopes of models
isCovered = all(
item in selectedDatasetScopes for item in selectedModelScopes)
if (not isCovered):
difference = [
item for item in selectedModelScopes if item not in selectedDatasetScopes]
printMessage = f'Ignoring {difference} scope(s) of {userInputs["Model"]}, as not covered in {userInputs["Dataset"]}!'
logger(printMessage, 'warn')
ignoredContexts = difference
# Checking if at least one evaluation metric is selected
if (len(userInputs['Evaluation']) == 0):
printMessage = 'No evaluation metric has been selected!'
logger(printMessage, 'error')
return
logger(f'User inputs: {userInputs}', 'info', True)
userInputs['Ignored'] = ignoredContexts
return userInputs