-
Notifications
You must be signed in to change notification settings - Fork 0
/
feature_extract.py
130 lines (100 loc) · 4.62 KB
/
feature_extract.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
from imutils import face_utils
import numpy as np
import pandas as pd
import imutils
import dlib
import cv2
from math import sqrt
import csv
import argparse
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
def dist(x1, y1, x2, y2):
return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
def abs_diff(p1, p2):
return abs(p1[0] - p2[0]), abs(p1[1] - p2[1])
def mid_pt(p1, p2):
return (p1[0] + p2[0]) // 2, (p1[1] + p2[1]) // 2
def feature_extraction(path):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
image = cv2.imread(path)
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1)
for (i, face) in enumerate(faces):
shape = predictor(gray, face)
shape = face_utils.shape_to_np(shape)
# convert dlib's rectangle to a OpenCV-style bounding box
# [i.e., (x, y, w, h)], then draw the face bounding box
(x, y, w, h) = face_utils.rect_to_bb(face)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
for (x, y) in shape:
cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
# show the output image with the face detections + facial landmarks
#cv2.imshow("Output", image)
cv2.waitKey(0)
pts = {"1": 0, "15": 16, "4": 3, "12": 13, "8": 8, "67": 66, "28": 36, "33": 45, "30": 39, "35": 42, "22": 17,
"29": 37, "25": 21, "19": 22, "34": 43, "16": 26, "26": 20, "27": 18, "20": 23, "21": 25}
# Calculation of parameters
chk_w = dist(shape[pts["1"]][0], shape[pts["1"]][1], shape[pts["15"]][0], shape[pts["15"]][1])
jaw_w = dist(shape[pts["4"]][0], shape[pts["4"]][1], shape[pts["12"]][0], shape[pts["12"]][1])
cjwr = chk_w / jaw_w
color = gray[shape[30][1], shape[30][0]]
N1 = mid_pt(shape[pts["29"]], shape[pts["34"]])
N3 = shape[20]
N4 = shape[23]
m1 = (shape[pts["28"]][1] - N3[1]) / (shape[pts["28"]][0] - N3[0])
m2 = (shape[pts["33"]][1] - N4[1]) / (shape[pts["33"]][0] - N4[0])
x_temp = int((N3[1] - N4[1] - m1 * N3[0] + m2 * N4[0]) / (m2 - m1))
y_temp = int(N3[1] + m1 * (x_temp - N3[0]))
N2 = (x_temp, y_temp)
ufc_h = dist(shape[pts["67"]][0], shape[pts["67"]][1], N1[0], N1[1])
whr = chk_w / ufc_h
perimeter = cv2.arcLength(np.array(
[shape[pts["1"]], shape[pts["4"]], shape[pts["8"]], shape[pts["12"]], shape[pts["15"]], shape[pts["1"]]]), True)
area = cv2.contourArea(np.array(
[shape[pts["1"]], shape[pts["4"]], shape[pts["8"]], shape[pts["12"]], shape[pts["15"]], shape[pts["1"]]]))
par = perimeter / area
es = 0.5 * (dist(shape[pts["28"]][0], shape[pts["28"]][1], shape[pts["33"]][0], shape[pts["33"]][1]) -
(dist(shape[pts["30"]][0], shape[pts["30"]][1], shape[pts["35"]][0], shape[pts["35"]][1])))
lfh = (shape[pts["8"]][1] - shape[pts["1"]][1])
lffh = lfh / dist(N2[0], N2[1], shape[pts["8"]][0], shape[pts["8"]][1])
fwlfh = chk_w / lfh
dist1 = dist(shape[pts["22"]][0], shape[pts["22"]][1], shape[pts["28"]][0], shape[pts["28"]][1])
dist2 = dist(shape[pts["29"]][0], shape[pts["29"]][1], N3[0], N3[1])
dist3 = dist(shape[pts["25"]][0], shape[pts["25"]][1], shape[pts["30"]][0], shape[pts["30"]][1])
dist4 = dist(shape[pts["19"]][0], shape[pts["19"]][1], shape[pts["35"]][0], shape[pts["35"]][1])
dist5 = dist(shape[pts["34"]][0], shape[pts["34"]][1], N4[0], N4[1])
dist6 = dist(shape[pts["16"]][0], shape[pts["16"]][1], shape[pts["33"]][0], shape[pts["33"]][1])
meh = (dist1 + dist2 + dist3 + dist4 + dist5 + dist6)/6
features = {"cjwr": cjwr, "whr": whr, "par": par, "es": es, "lffh": lffh, "fwlfh": fwlfh, "meh": meh, "col":color}
return features
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())
features = feature_extraction(args['image'])
# print(fwlfh)
new_df = pd.DataFrame([features])
print(new_df.head())
df=pd.read_csv('final_results.csv')
y = df['bmi']
X=df.drop(['bmi','Height','Weight','Name','path'],axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
clf = SVR(C=1.0, epsilon=0.2)
clf.fit(X_train, y_train)
#features = pd.DataFrame.from_dict(features)
#(features.items(), columns=['fwlfh', 'cjwr', 'whr', 'es', 'par', 'col', 'lffh', 'meh'])
#print(features)
result = clf.predict(new_df)
print(result)
if __name__ == "__main__":
main()