forked from andresantonioriveros/pyRF
-
Notifications
You must be signed in to change notification settings - Fork 0
/
uf_balanced.py
77 lines (54 loc) · 2.65 KB
/
uf_balanced.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# coding=utf-8
# -------------------------------------------------------------------------------------------------
from functools import partial
from multiprocessing import Pool
import argparse
import sys
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
import metrics
import parallel
import utils
if __name__ == '__main__':
# Recibo parámetros de la linea de comandos
print ' '.join(sys.argv)
parser = argparse.ArgumentParser()
parser.add_argument('--percentage', required=True, type=str)
parser.add_argument('--n_samples', required=True, type=int)
parser.add_argument('--catalog', default='MACHO', choices=['MACHO', 'EROS', 'OGLE'])
parser.add_argument('--max_depth', required=False, type=int)
parser.add_argument('--min_samples_split', required=False, type=int)
parser.add_argument('--sets_path', required=True, type=str)
parser.add_argument('--result_path', required=True, type=str)
parser.add_argument('--train_index_filter', required=False, type=str)
parser.add_argument('--test_index_filter', required=False, type=str)
parser.add_argument('--feature_filter', nargs='*', type=str)
args = parser.parse_args(sys.argv[1:])
percentage = args.percentage
n_samples = args.n_samples
catalog = args.catalog
max_depth = args.max_depth
min_samples_split = args.min_samples_split
sets_path = args.sets_path
result_path = args.result_path
train_index_filter = args.train_index_filter
test_index_filter = args.test_index_filter
feature_filter = args.feature_filter
if train_index_filter is not None:
train_index_filter = pd.read_csv(train_index_filter, index_col=0).index
if test_index_filter is not None:
test_index_filter = pd.read_csv(test_index_filter, index_col=0).index
paths = [sets_path + catalog + '_sampled_' + str(i) + '.csv' for i in xrange(n_samples)]
resultados = []
for p in paths:
data = pd.read_csv(p, index_col=0)
train_X, train_y = utils.filter_data(data, index_filter=train_index_filter, feature_filter=feature_filter)
test_X, test_y = utils.filter_data(data, index_filter=test_index_filter, feature_filter=feature_filter)
clf = None
clf = DecisionTreeClassifier(criterion='entropy', max_depth=max_depth,
min_samples_split=min_samples_split)
clf.fit(train_X, train_y)
resultados.append(metrics.predict_table(clf, test_X, test_y))
result = metrics.aggregate_predictions(resultados)
result.to_csv(result_path + 'result_' + percentage + '.csv')
print metrics.weighted_f_score(metrics.confusion_matrix(result))