Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

모델 부분 추가용 #1

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
191 changes: 191 additions & 0 deletions Data_Generator/helmet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
import os
import sys
import random
import argparse
import numpy as np
from PIL import Image, ImageFile

__version__ = '0.3.0'


IMAGE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'images')
IMAGE1_PATH = os.path.join(IMAGE_DIR, 'helmet1.jpg')
IMAGE2_PATH = os.path.join(IMAGE_DIR, 'helmet2.jpg')
IMAGE3_PATH = os.path.join(IMAGE_DIR, 'helmet3.jpg')
IMAGE4_PATH = os.path.join(IMAGE_DIR, 'helmet4.jpg')


def cli():
parser = argparse.ArgumentParser(description='Wear a face helmet in the given picture.')
parser.add_argument('pic_path', help='Picture path.')
parser.add_argument('--show', action='store_true', help='Whether show picture with helmet or not.')
parser.add_argument('--model', default='hog', choices=['hog', 'cnn'], help='Which face detection model to use.')
group = parser.add_mutually_exclusive_group()
group.add_argument('--one', action='store_true', help='Wear helmet1')
group.add_argument('--two', action='store_true', help='Wear helmet2')
group.add_argument('--three', action='store_true', help='Wear helmet3')
group.add_argument('--four', action='store_true', help='Wear helmet4')
group.add_argument('--five', action='store_true', help='Wear helmet5')
args = parser.parse_args()

pic_path = args.pic_path
if not os.path.exists(args.pic_path):
print(f'Picture {pic_path} not exists.')
sys.exit(1)

if args.one:
helmet_path = IMAGE1_PATH
elif args.two:
helmet_path = IMAGE2_PATH
elif args.three:
helmet_path = IMAGE3_PATH
elif args.four:
helmet_path = IMAGE4_PATH
else:
helmet_path = IMAGE5_PATH

FaceMasker(pic_path, helmet_path, args.show, args.model).helmet()


def create_helmet(image_path):
pic_path = image_path
helmet_path = os.path.join(IMAGE_DIR, 'helmet6.png')
show = False
model = "hog"
FaceMasker(pic_path, helmet_path, show, model).helmet()



class FaceMasker:
KEY_FACIAL_FEATURES = ('left_eye', 'right_eye')

def __init__(self, face_path, helmet_path, show=False, model='hog'):
self.face_path = face_path
self.helmet_path = helmet_path
self.show = show
self.model = model
self._face_img: ImageFile = None
self._helmet_img: ImageFile = None

def helmet(self):
import face_recognition

face_image_np = face_recognition.load_image_file(self.face_path)
face_locations = face_recognition.face_locations(face_image_np, model=self.model)
#face_landmarks = face_recognition.face_landmarks(face_image_np, face_locations)
self._face_img = Image.fromarray(face_image_np)
self._helmet_img = Image.open(self.helmet_path)

found_face = False
for face_location in face_locations:
face_landmarks = face_recognition.face_landmarks(face_image_np, face_locations)
# check whether facial features meet requirement
for face_landmark in face_landmarks:
skip = False
for facial_feature in self.KEY_FACIAL_FEATURES:
if facial_feature not in face_landmark:
skip = True
break
if skip:
continue

# helmet face
found_face = True
self._helmet_face(face_landmark, face_location)

if found_face:
if self.show:
self._face_img.show()

# save
self._save()
else:
print('Found no face.')

def _helmet_face(self, face_landmark: dict, face_location):
top, right, bottom, left = face_location

left_eye = face_landmark['left_eye']
left_eye_point = left_eye[len(left_eye) // 2]
left_eye_v = np.array(left_eye_point)

right_eye = face_landmark['right_eye']
right_eye_point = right_eye[len(right_eye) // 2]
right_eye_v = np.array(right_eye_point)

eye_v = (right_eye_v + left_eye_v)//2

# split helmet and resize
width = self._helmet_img.width
height = self._helmet_img.height
width_ratio = 1.2
new_height = int(np.linalg.norm(top - eye_v[1]))
new_width = int(np.linalg.norm(right-left))

# left
helmet_left_img = self._helmet_img.crop((0, 0, width // 2, height))
#helmet_left_width = self.get_distance_from_point_to_line(chin_left_point, nose_point, chin_bottom_point)
#helmet_left_width = int(helmet_left_width * width_ratio)
#helmet_left_img = helmet_left_img.resize((helmet_left_width, new_height))

# right
#helmet_right_img = self._helmet_img.crop((width // 2, 0, width, height))
#helmet_right_width = self.get_distance_from_point_to_line(chin_right_point, nose_point, chin_bottom_point)
#helmet_right_width = int(helmet_right_width * width_ratio)
#helmet_right_img = helmet_right_img.resize((helmet_right_width, new_height))


# merge helmet
size = (new_width, new_height*2)
new_helmet = self._helmet_img.resize(size)
#self._helmet_img = Image.new('RGBA', size)
#helmet_img.paste(helmet_img, (0, 0), helmet_img)

#여기서부터 임의로 부착
#b = os.path.basename(self.face_path)
#pname = os.path.splitext(b)
#temp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'with_helmet\\') + pname[0] + '-with-helmet' + pname[1]
#self._face_img.paste(new_helmet, (0,0))
#self._face_img.save(temp_path)
#여기까지 부착 끝

# rotate helmet
angle = np.arctan2(top - (left_eye_point[1] + right_eye_point[1]) / 2, (left+right)/2 - (left_eye_point[0] + right_eye_point[0])/2)
rotated_helmet_img = self._helmet_img.rotate(angle, expand=True)

# calculate helmet location
center_x = ((left+right)/2 + (left_eye_point[0] + right_eye_point[0]) / 2) // 2
center_y = (top + (left_eye_point[1] + right_eye_point[1]) / 2) // 2

offset = self._helmet_img.width // 2 - helmet_left_img.width
radian = angle * np.pi / 180
box_x = int(center_x + int(offset * np.cos(radian)) - rotated_helmet_img.width // 2)
box_y = int(center_y + int(offset * np.sin(radian)) - rotated_helmet_img.height // 2)

# add helmet
#self._face_img.paste(helmet_img, (box_x, box_y), helmet_img)
#self._face_img.paste(new_helmet, (left, top-self._helmet_img.height), new_helmet)
self._face_img.paste(new_helmet, (left, top-self._helmet_img.height//3), new_helmet)

def _save(self):
path_splits = os.path.splitext(self.face_path)
base = os.path.basename(self.face_path)
pure_name = os.path.splitext(base)
new_face_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'with_helmet4\\') + pure_name[0] + '-with-helmet6-1' + pure_name[1]
self._face_img.save(new_face_path)
print(f'Save to {new_face_path}')

@staticmethod
def get_distance_from_point_to_line(point, line_point1, line_point2):
distance = np.abs((line_point2[1] - line_point1[1]) * point[0] +
(line_point1[0] - line_point2[0]) * point[1] +
(line_point2[0] - line_point1[0]) * line_point1[1] +
(line_point1[1] - line_point2[1]) * line_point1[0]) / \
np.sqrt((line_point2[1] - line_point1[1]) * (line_point2[1] - line_point1[1]) +
(line_point1[0] - line_point2[0]) * (line_point1[0] - line_point2[0]))
return int(distance)


if __name__ == '__main__':
#cli()
create_helmet(image_path)
Binary file added Data_Generator/images/blue-mask.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Data_Generator/images/helmet1.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Data_Generator/images/helmet2.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Data_Generator/images/helmet3.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Data_Generator/images/helmet4.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Data_Generator/images/helmet5.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Data_Generator/images/helmet6.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added Data_Generator/images/white.PNG
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
18 changes: 18 additions & 0 deletions Data_Generator/loop_through_folder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import cv2
import os
from helmet import create_helmet


folder_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'without_helmet')
#dist_path = "/home/preeth/Downloads"

#c = 0
images = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
for i in range(len(images)):
#print("the path of the image is", images[i])
#image = cv2.imread(images[i])
#c = c + 1
create_helmet(images[i])



Binary file added models/converted_model.tflite
Binary file not shown.
5 changes: 5 additions & 0 deletions models/h5totf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from tensorflow import keras
model = keras.models.load_model('./mask_detector.model', compile=False)

export_path = './tf'
model.save(export_path, save_format="tf")
Binary file added models/mask_detector.model
Binary file not shown.
110 changes: 110 additions & 0 deletions models/tensorflow_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
# -*- coding: utf-8 -*-
"""Untitled2.ipynb

Automatically generated by Colaboratory.

Original file is located at
https://colab.research.google.com/drive/1h3uWbBpq1QtrtaVbvHVGiwno8HaSeig-
"""

from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
from google.colab import drive
drive.mount('/content/gdrive')

INIT_LR = 1e-4
EPOCHS = 20
BS = 32

DIRECTORY = r"/content/gdrive/My Drive/experiements/data"
CATEGORIES = ["with_helmet", "without_helmet"]

data = []
labels = []

for category in CATEGORIES:
path = os.path.join(DIRECTORY, category)
for img in os.listdir(path):
img_path = os.path.join(path, img)
image = load_img(img_path, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)

data.append(image)
labels.append(category)

lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)

data = np.array(data, dtype="float32")
labels = np.array(labels)

(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42)

aug = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")

baseModel = MobileNetV2(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))

headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)

model = Model(inputs=baseModel.input, outputs=headModel)

for layer in baseModel.layers:
layer.trainable = False

print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])

print("[INFO] training head...")
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS)

print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)

predIdxs = np.argmax(predIdxs, axis=1)

print(classification_report(testY.argmax(axis=1), predIdxs,
target_names=lb.classes_))

print("[INFO] saving mask detector model...")
model.save("mask_detector", save_format="h5")