-
Notifications
You must be signed in to change notification settings - Fork 1
/
Predict.py
95 lines (78 loc) · 2.59 KB
/
Predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import pickle
import numpy as np
from sklearn import neighbors, metrics
from sklearn import cross_validation
from sklearn.cross_validation import cross_val_predict
from sklearn import linear_model
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import Ridge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
with open('/home/elin/Programming/git/Prediction/elastic_api/Xy.pkl', 'r') as input:
dataset = pickle.load(input)
input.close()
X=dataset.data
y=dataset.target
model_lr = linear_model.LinearRegression()
#pred_lr = model_lr.fit(X, y).predict(X)
pred_lr = cross_val_predict(model_lr, X, y, cv=10)
model_nn = neighbors.KNeighborsRegressor(5, weights='distance')
#pred_nn = model_nn.fit(X, y).predict(X)
pred_nn = cross_val_predict(model_nn, X, y, cv=10)
model_poly = make_pipeline(PolynomialFeatures(3), Ridge())
#pred_poly = model_poly.fit(X, y).predict(X)
pred_poly = cross_val_predict(model_poly, X, y, cv=10)
fig, ax = plt.subplots()
ax.scatter(y, pred_lr, c='g')
ax.scatter(y, pred_poly, c='b')
ax.scatter(y, pred_nn, c='y')
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()
'''
X_train=dataset.data[24:]
X_test=dataset.data[:24]
y_train=dataset.target[24:]
y_test=dataset.target[:24]
print X_train
print "...."
print X_test
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
plt.show()
'''
'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
n = 100
ax.scatter(X[:, 0], X[:, 1], y)
ax.plot_wireframe(X[:, 0], X[:, 1], pred_poly)
ax.set_xlabel('TTT')
ax.set_ylabel('count')
ax.set_zlabel('TTT+2')
plt.show()
'''