diff --git a/.idea/Filteristic.iml b/.idea/Filteristic.iml
index d28d420..8aeb520 100644
--- a/.idea/Filteristic.iml
+++ b/.idea/Filteristic.iml
@@ -2,11 +2,10 @@
-
-
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
index b534b89..8d93904 100644
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/background_image/check_directory.py b/background_image/check_directory.py
index 3691be4..1e2dcd0 100644
--- a/background_image/check_directory.py
+++ b/background_image/check_directory.py
@@ -27,20 +27,30 @@ def saveimage(path):
os.remove(path)
return
def add_path(image_path,origin):
+
+ path_image = file_exist(origin)
+
+ if ':' in image_path and "\\" in image_path and 'http' not in image_path:
+ img = cv2.imread(image_path)
+ cv2.imwrite(path_image, img)
+
+ else:
+ r= requests.get(image_path)
+ with open(path_image,'wb') as f :
+ f.write(r.content)
+
+ return path_image
+
+def file_exist(origin):
path_name = input('add path name : ')
- r= requests.get(image_path)
path_image = f'{origin}{path_name}.png'
file_exists = exists(path_image)
- while file_exists :
+ while file_exists:
path_name2 = input('this name is exist try another one or enter q : ')
- if path_name2 =='q':
- path_image=''
+ if path_name2 == 'q':
+ path_image = ''
break
path_image = f'{origin}{path_name2}.png'
file_exists = exists(path_image)
-
- with open(path_image,'wb') as f :
- f.write(r.content)
-
return path_image
\ No newline at end of file
diff --git a/background_image/filter_background_image.py b/background_image/filter_background_image.py
index 71c5b57..3d2fa87 100644
--- a/background_image/filter_background_image.py
+++ b/background_image/filter_background_image.py
@@ -5,10 +5,14 @@ def filter_background_image():
background_filter =''
color = ''
type_of_background = input('enter type of background : [gray,image,blur,color] : ')
- path_input_image = add_path(input('path image : '),'Input_image/')
+
+ path_input_image=add_path(input('image path : '),'Input_image/')
+
if type_filter ==0 and type_of_background == 'image' :
background_filter = add_path(input('path image : '), 'input_background/')
backgroundImage(type_of_background, path_input_image,color, background_filter)
+
+
if type_of_background=='color':
add_color = input('input color rgb : 0,0,255 : ')
color = tuple(map(int, add_color.split(',')))
@@ -17,4 +21,5 @@ def filter_background_image():
backgroundImage(type_of_background,path_input_image,color,'../assest/background/back1.jpg')
if type_filter == 2 :
backgroundImage(type_of_background,path_input_image,background_filter,color,'../assest/background/back2.jpg')
+
filter_background_image()
\ No newline at end of file
diff --git a/background_live/background_live.py b/background_live/background_live.py
index 3dbee72..fd307ec 100644
--- a/background_live/background_live.py
+++ b/background_live/background_live.py
@@ -2,7 +2,6 @@
import mediapipe as mp
import numpy as np
from request_image import add_path
-import os
def background(path,blur =1,img=2):
mp_selfie_segmentation = mp.solutions.selfie_segmentation
back1=1
@@ -15,10 +14,10 @@ def background(path,blur =1,img=2):
# begin with selfie segmentation model
- img_name = ""
+
while cam.isOpened():
scene = cv2.imread(path) # read the scene image
- scene = cv2.blur(scene, (blur, blur))
+ scene = cv2.blur(scene, (1, 1))
scene = cv2.resize(scene, (fsize[1], fsize[0])) # resize scene to the size of frame
with mp_selfie_segmentation.SelfieSegmentation(model_selection=1) as selfie_seg:
bg_image = scene
@@ -52,29 +51,11 @@ def background(path,blur =1,img=2):
if back1 ==img+1:
back1=1
path = f'../assest/background/back{back1}.png'
- if key == ord('a'):
- path = add_path(input('path image : '))
- # wait until any key is pressed
+
if key == ord('q'):
cam.release()
cv2.destroyAllWindows() # wait until any key is pressed
- elif key == ord("c"):
- img_name = "../saved/opencv_frame.png"
- cv2.imwrite(img_name, output_image)
- # print("{} written!".format(img_name))
- break
- frames = cv2.imread(img_name)
- cv2.imshow("Background Change with MP", frames)
- key = cv2.waitKey(0)
- os.remove("../saved/opencv_frame.png")
- if key == ord("s"):
- user_name = input("enter name")
- imgdir = f"../saved/{user_name}.png"
- cv2.imwrite(imgdir, frames)
- background('../assest/background/back1.png',blur,img)
- if key == ord("e"):
- background('../assest/background/back1.png',blur,img)
cam.release()
cv2.destroyAllWindows()
diff --git a/background_live/filter_background_live.py b/background_live/filter_background_live.py
index b002c31..ca161f6 100644
--- a/background_live/filter_background_live.py
+++ b/background_live/filter_background_live.py
@@ -1,13 +1,10 @@
from background_live import background
from request_image import add_path,check_image
-from os.path import exists
-type_filter = int(input('type_filter : '))
+type_filter = (input('add new background [yes,no] ? '))
type_blur=int(input('number for blur start from 1 : '))
-if type_filter == 0:
- path,img = add_path(input('path image : '))
- background('../assest/background/back1.png', type_blur,img)
-if type_filter ==1:
- img =check_image()
- print(img)
- background('../assest/background/back1.png',type_blur,img)
+if type_filter == 'yes':
+ add_path(input('path image : '))
+
+img =check_image()
+background('../assest/background/back1.png',type_blur,img)
diff --git a/background_live/request_image.py b/background_live/request_image.py
index 1e548ca..f00ceaf 100644
--- a/background_live/request_image.py
+++ b/background_live/request_image.py
@@ -1,19 +1,29 @@
import requests
from os.path import exists
+import os
+import cv2
def add_path(image_path):
- r= requests.get(image_path)
- img =1
+ path_image=file_exist()
+ if ':' in image_path and "\\" in image_path and 'http' not in image_path:
+ print('hello')
+ img = cv2.imread(image_path)
+ cv2.imwrite(path_image, img)
+ else:
+ print('hi')
+ r= requests.get(image_path)
+ with open(path_image,'wb') as f :
+ f.write(r.content)
+
+
+def file_exist():
+ img = 1
path_image = f'../assest/background/back{img}.png'
file_exists = exists(path_image)
while file_exists :
img+=1
path_image = f'../assest/background/back{img}.png'
file_exists = exists(path_image)
-
- with open(path_image,'wb') as f :
- f.write(r.content)
-
- return path_image,img
+ return path_image
def check_image():
img = 1
diff --git a/filters_image/__pycache__/change_filter.cpython-39.pyc b/filters_image/__pycache__/change_filter.cpython-39.pyc
new file mode 100644
index 0000000..1b3db78
Binary files /dev/null and b/filters_image/__pycache__/change_filter.cpython-39.pyc differ
diff --git a/filters_image/change_filter.py b/filters_image/change_filter.py
new file mode 100644
index 0000000..c94fb31
--- /dev/null
+++ b/filters_image/change_filter.py
@@ -0,0 +1,69 @@
+## jaso file for adding
+
+change_filter =[{'filter':['../assest/hair9.png','../assest/ghoul2.png','../assest/eye1.png','../assest/eye2.png'],
+ 'center':[27,66,40,46],
+ 'width':[1.5,1,0.2,0.2],
+ 'height':[0.5,1,0.4,0.4],
+ 'up':[100,20,5,5],
+ 'left':[0,0,0,0],
+ 'counte':1},
+ {'filter': ["../assest/birthday-hat.png"],
+ 'center': [27],
+ 'width': [1.3],
+ 'height': [1],
+ 'up': [120],
+ 'left': [0],
+ 'counte':2},
+ {'filter': ["../assest/gold-crown.png"],
+ 'center': [27],
+ 'width': [1.3],
+ 'height': [1],
+ 'up': [120],
+ 'left': [0],
+ 'counte':3},
+ {'filter': ["../assest/queens-crown.png"],
+ 'center': [27],
+ 'width': [1.3],
+ 'height': [1],
+ 'up': [120],
+ 'left': [0],
+ 'counte':4},
+ {'filter': ["../assest/flower-crown.png"],
+ 'center': [27],
+ 'width': [1.3],
+ 'height': [1],
+ 'up': [80],
+ 'left': [0],
+ 'counte': 5},
+ {'filter': ["../assest/funny-sunglass.png"],
+ 'center': [27],
+ 'width': [1.05],
+ 'height': [0.33],
+ 'up': [0],
+ 'left': [0],
+ 'counte': 6},
+ {'filter': ['../assest/moustache2.png'],
+ 'center': [51],
+ 'width': [1],
+ 'height': [0.5],
+ 'up': [1],
+ 'left': [-3],
+ 'counte': 7},
+ {'filter': ["../assest/beard.png"],
+ 'center': [8],
+ 'width': [1.1],
+ 'height': [1.5],
+ 'up': [0],
+ 'left': [0],
+ 'counte': 8},
+ {'filter': ["../assest/tongue.png"],
+ 'center': [57],
+ 'width': [0.6],
+ 'height': [1.2],
+ 'up': [-25],
+ 'left': [0],
+ 'counte': 0},
+
+
+ ]
+
diff --git a/filters_image/image_filtering_face.py b/filters_image/image_filtering_face.py
index 99ac195..9308c74 100644
--- a/filters_image/image_filtering_face.py
+++ b/filters_image/image_filtering_face.py
@@ -1,89 +1,105 @@
-import cv2
-import numpy as np
-import dlib
-from math import hypot
-from filters_live.change_filter import change_filter
-import os
-
-def image_filtering_face(path_filter,path_image,center,width,height,up,left,counte=0):
- filter_image = []
- for i in path_filter:
- filter_image.append(cv2.imread(i))
- image = cv2.imread(path_image)
- rows, cols, _ = image.shape
- filter1 = np.zeros((rows, cols), np.uint8)
- filter1.fill(0)
- gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- detector = dlib.get_frontal_face_detector()
- faces = detector(image)
- if faces:
- try:
- for i in range(len(path_filter)):
- filter(image,gray_image,faces,filter_image[i],center[i],width[i],height[i],up[i],left[i])
- except:
- image = cv2.imread(path_image)
- cv2.imshow("Frame", image)
- else:
- image = cv2.imread(path_image)
- cv2.imshow("Frame", image)
- key = cv2.waitKey(0)
- if key == ord('n'):
- change_image(counte,path_image)
- elif key == ord('q'):
- cv2.destroyAllWindows()
- if key == ord("s"):
- user_name = input("enter name")
- imgdir = f"../saved/{user_name}.png"
- cv2.imwrite(imgdir, image)
- change_image(counte,path_image)
-
-
-def filter(image,gray_frame,faces,filter_image1,center,width,height,up=0,left=0):
- predictor_path = r"../assest/shape_predictor_68_face_landmarks.dat"
- predictor = dlib.shape_predictor(predictor_path)
-
- for face in faces:
- landmarks = predictor(gray_frame, face)
-
- center_filter = (landmarks.part(center).x-left, landmarks.part(center).y-up)
- left_filter = (landmarks.part(4).x, landmarks.part(4).y)
- right_filter = (landmarks.part(14).x, landmarks.part(14).y)
-
- filter_width = int(hypot(left_filter[0] - right_filter[0],
- left_filter[1] - right_filter[1]) * width)
- filter_height = int(filter_width * height)
-
- # New filter position
- top_left = (int(center_filter[0] - filter_width / 2),
- int(center_filter[1] - filter_height / 2))
- bottom_right = (int(center_filter[0] + filter_width / 2),
- int(center_filter[1] + filter_height / 2))
-
- # Adding the new filter
- filtery = cv2.resize(filter_image1, (filter_width, filter_height))
- filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)
- _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV)
-
- filter_area = image[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width]
- filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)
- final_filter = cv2.add(filter_area_no_filter, filtery)
-
- image[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width,:] = final_filter
-
- cv2.imshow("Frame", image)
-
-def change_image(i,path_image):
- image_filtering_face(
- change_filter[i]['filter'],
- path_image,
- change_filter[i]['center'],
- change_filter[i]['width'],
- change_filter[i]['height'],
- change_filter[i]['up'],
- change_filter[i]['left'],
- change_filter[i]['counte']
- )
-if __name__ == "__main__":
- image_filtering_face(["../assest/tongue.png"],"../assest/face.jpg",[57],[0.6],[1.2],[-25],[0])
\ No newline at end of file
+import cv2
+import numpy as np
+import dlib
+from math import hypot
+from change_filter import change_filter
+import os
+
+# Loading Face detector
+
+def image_filtering_face(path_filter,center,width,height,up,left,counte=0):
+ # path = r"../assest/moustache2.png"
+
+ filter_image = []
+ for i in path_filter:
+ filter_image.append(cv2.imread(i))
+
+ image = cv2.imread("saved/test.jpg")
+ rows, cols, _ = image.shape
+ filter1 = np.zeros((rows, cols), np.uint8)
+ filter1.fill(0)
+ gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ detector = dlib.get_frontal_face_detector()
+ faces = detector(image)
+ if faces:
+ try:
+ for i in range(len(path_filter)):
+ filter(image,gray_image,faces,filter_image[i],center[i],width[i],height[i],up[i],left[i])
+ except:
+ image = cv2.imread("saved/test.jpg")
+ cv2.imshow("Frame", image)
+
+ key = cv2.waitKey(0)
+ if key == ord('n'):
+ change_image(counte)
+ elif key == ord('q'):
+ cv2.destroyAllWindows()
+
+ elif key == ord("c"):
+ img_name = "../saved/opencv_frame.png"
+ cv2.imwrite(img_name, image)
+ print("{} written!".format(img_name))
+
+ image = cv2.imread(img_name)
+ cv2.imshow("Frame", image)
+ key = cv2.waitKey(0)
+ os.remove("../saved/opencv_frame.png")
+ if key == ord("s"):
+ user_name = input("enter name")
+ imgdir = f"../saved/{user_name}.png"
+ cv2.imwrite(imgdir, image)
+ image_filtering_face(["../assest/tongue.png"],"../assest/face.jpg",[57],[0.6],[1.2],[-25],[0])
+ if key == ord("e"):
+ image_filtering_face(["../assest/tongue.png"],"../assest/face.jpg",[57],[0.6],[1.2],[-25],[0])
+
+
+
+
+def filter(image,gray_frame,faces,filter_image1,center,width,height,up=0,left=0):
+ predictor_path = r"../assest/shape_predictor_68_face_landmarks.dat"
+ predictor = dlib.shape_predictor(predictor_path)
+
+ for face in faces:
+ landmarks = predictor(gray_frame, face)
+
+ center_filter = (landmarks.part(center).x-left, landmarks.part(center).y-up)
+ left_filter = (landmarks.part(4).x, landmarks.part(4).y)
+ right_filter = (landmarks.part(14).x, landmarks.part(14).y)
+
+ filter_width = int(hypot(left_filter[0] - right_filter[0],
+ left_filter[1] - right_filter[1]) * width)
+ filter_height = int(filter_width * height)
+
+ # New filter position
+ top_left = (int(center_filter[0] - filter_width / 2),
+ int(center_filter[1] - filter_height / 2))
+ bottom_right = (int(center_filter[0] + filter_width / 2),
+ int(center_filter[1] + filter_height / 2))
+
+ # Adding the new filter
+ filtery = cv2.resize(filter_image1, (filter_width, filter_height))
+ filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)
+ _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV)
+
+ filter_area = image[top_left[1]: top_left[1] + filter_height,
+ top_left[0]: top_left[0] + filter_width]
+ filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)
+ final_filter = cv2.add(filter_area_no_filter, filtery)
+
+ image[top_left[1]: top_left[1] + filter_height,
+ top_left[0]: top_left[0] + filter_width,:] = final_filter
+
+ cv2.imshow("Frame", image)
+
+def change_image(i):
+ image_filtering_face(
+ change_filter[i]['filter'],
+ change_filter[i]['center'],
+ change_filter[i]['width'],
+ change_filter[i]['height'],
+ change_filter[i]['up'],
+ change_filter[i]['left'],
+ change_filter[i]['counte']
+ )
+if __name__ == "__main__":
+ image_filtering_face(["../assest/tongue.png"],[57],[0.6],[1.2],[-25],[0])
\ No newline at end of file
diff --git a/filters_image/saved/test.jpg b/filters_image/saved/test.jpg
new file mode 100644
index 0000000..1202ab7
Binary files /dev/null and b/filters_image/saved/test.jpg differ