-
Notifications
You must be signed in to change notification settings - Fork 0
/
keras32_split4_Dense.py
109 lines (92 loc) · 3.46 KB
/
keras32_split4_Dense.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# 과제 및 실습
# DNN으로 구성
# 전처리, early_stopping, MinMax
# 데이터 1~ 100 / 5개씩 잘라라
# x y
# 1, 2, 3, 4, 5 6
# ...
# 95 , 96, 97, 98, 99 100
# predict를 만들것
# 96, 97, 98, 99, 100 -> 101
# ...
#100, 101, 102, 103, 104, 105
#예상 predict는 (101, 102, 103, 104, 105)
import numpy as np
#1. 데이터
a = np.array(range(1, 101))
b = np.array(range(96,106))
size = 6
def split_x(seq, size):
aaa = []
for i in range(len(seq) - size + 1 ):
subset = seq[i : (i + size)]
aaa.append(subset)
print(type(aaa))
return np.array(aaa)
dataset = split_x(a, size)
#print(dataset) #(1~100)
x = dataset[:, :5]
y = dataset[:, -1]
dataset_pred = np.array(split_x(b, 6))
x_pred = dataset_pred[:, :5] #전체에서 앞에서 5개자르기
y_pred = dataset_pred[ :,-1] # 전체핼에서 가장 뒤에것 가져오기
print(x_pred)
print(y_pred)
#y = y.reshape(y.shape[0], y.shape[1], 1)
# print(x.shape) #(95, 5, 1)
# print(y.shape) #(95, 1, 1)
# print(x.shape) # (95, 5)
# print(y.shape) # (95,)
#1.1데이터 정제(MinMax)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, test_size = 0.2,shuffle = True, random_state = 50)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size = 0.8, test_size = 0.2, shuffle = True, random_state = 50)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_tranin = scaler.transform(x_train) #x_train만 trans 후 바뀐수치 x_train에 다시넣기
x_test = scaler.transform(x_test) #x_test 따로 trans 후 바뀐수치 x_test에 다시넣기
x_val = scaler.transform(x_val)
#x_pred = scaler.transform(x_pred)
print(x_train.shape) #(60,5)
print(x_test.shape) #(19,5)
print(x_val.shape) #(16,5)
print(x_pred.shape) #(5,5)
x_train = x_train.reshape(x_train.shape[0],x_train.shape[1],1) #(95,5) => (95,5,1)
x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],1) #(95,5) => (95,5,1)
x_val = x_val.reshape(x_val.shape[0],x_val.shape[1],1) #(95,5) => (95,5,1)
x_pred = x_pred.reshape(x_pred.shape[0],x_pred.shape[1],1) #(5,5) => (5,5,1)
#2. 모델구성(LSTM)
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, LSTM
input1 = Input(shape = (5,))
dense1 = Dense(100, activation = 'relu', )(input1)
dense1 = Dense(70, activation = 'linear')(dense1)
dense1 = Dense(60, activation = 'linear')(dense1)
dense1 = Dense(50, activation = 'linear')(dense1)
dense1 = Dense(50, activation = 'linear')(dense1)
outputs = Dense(1)(dense1)
model = Model(inputs = input1, outputs = outputs)
model.summary()
#컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor = 'loss', patience = 30, mode = 'auto')
model.compile(loss = 'mse', optimizer = 'adam', metrics = ['acc'])
model.fit(x_train, y_train, epochs = 1000, batch_size = 5, validation_data = (x_val, y_val), callbacks = [early_stopping])
#평가, 예측
loss = model.evaluate(x_train, y_train)
print("loss : ", loss)
result = model.predict(x_pred)
print('result : ', result)
# loss : [0.00027045546448789537, 0.0] LSTM earlystopping 223 // patience 30
# result : [[101.0326 ]
# [102.034744]
# [103.03702 ]
# [104.03941 ]
# [105.04197 ]]
# loss : [0.0009403941803611815, 0.0] DNN earlystopping 83 // patience 30
# result : [[101.0572 ]
# [102.05797]
# [103.05871]
# [104.05948]
# [105.06023]]