-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
119 lines (101 loc) 路 3.2 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import config
import tensorflow as tf
from tensorflow.keras import models, layers, Sequential
from tensorflow.keras.losses import SparseCategoricalCrossentropy as scce
import matplotlib.pyplot as plt
import numpy as np
import os
from utils import get_train_test_val_split
# Load dataset from disk to memory as tf.data.Dataset
dataset = tf.keras.utils.image_dataset_from_directory(
directory=config.DATA_DIR,
labels='inferred',
label_mode='int',
shuffle=True,
image_size=config.IMAGE_SIZE,
batch_size=config.BATCH_SIZE
)
# split the dataset for training, testing and validation purposes
train_data, test_data, val_data = get_train_test_val_split(
config.TRIAN_SIZE,
config.TEST_SIZE,
config.VALIDATION_SIZE,
dataset
)
# Enable cache, shuffle & prefetching
train_data = train_data.cache().shuffle(1000).prefetch(
buffer_size=tf.data.AUTOTUNE
)
test_data = test_data.cache().shuffle(1000).prefetch(
buffer_size=tf.data.AUTOTUNE
)
val_data = val_data.cache().shuffle(1000).prefetch(
buffer_size=tf.data.AUTOTUNE
)
# END Enable cache and prefetching
# Preprocessing layer (Resizing(to our image size) and Rescaling the image)
preprocess_layer = Sequential([
layers.experimental.preprocessing.Resizing(*config.IMAGE_SIZE),
layers.experimental.preprocessing.Rescaling(1.0/255)
])
# Data Augmentation layer (flip and rotation)
data_augmentation_layer = Sequential([
layers.experimental.preprocessing.RandomFlip('horizontal_and_vertical'),
layers.experimental.preprocessing.RandomRotation(0.2)
])
# Creating our classifier (CNN model)
potato_clsf = Sequential([
preprocess_layer,
data_augmentation_layer,
layers.Conv2D(
32, (3, 3),
activation='relu',
input_shape=(
config.BATCH_SIZE,
config.IMAGE_SIZE[0],
config.IMAGE_SIZE[1],
config.CHANNELS
)
),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(3, activation='softmax')
])
potato_clsf.build(input_shape=(
config.BATCH_SIZE,
config.IMAGE_SIZE[0],
config.IMAGE_SIZE[0],
config.CHANNELS
))
potato_clsf.compile(
optimizer='adam',
loss=scce(from_logits=False),
metrics=['accuracy']
)
history = potato_clsf.fit(
train_data,
batch_size=config.BATCH_SIZE,
validation_data=val_data,
verbose=1,
epochs=config.EPOCHS
)
def predict(model, img):
img_array = tf.expand_dims(img, 0)
predictions = model.predict(img_array)
predicted_class = dataset.class_names[np.argmax(predictions[0])]
confidence = round(100 * (np.max(predictions[0])), 2)
return predicted_class, confidence
model_version=max([int(i) for i in os.listdir("./models") + [0]])+1
potato_clsf.save(f"./models/{model_version}")
potato_clsf.save('./clsf.h5')