diff --git a/.idea/Filteristic.iml b/.idea/Filteristic.iml
index d28d420..8aeb520 100644
--- a/.idea/Filteristic.iml
+++ b/.idea/Filteristic.iml
@@ -2,11 +2,10 @@
-
-
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
index b534b89..8d93904 100644
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/__pycache__/main.cpython-38.pyc b/__pycache__/main.cpython-38.pyc
deleted file mode 100644
index cae4fd6..0000000
Binary files a/__pycache__/main.cpython-38.pyc and /dev/null differ
diff --git a/assest/background/back1.png b/assest/background/back1.png
new file mode 100644
index 0000000..d9f9437
Binary files /dev/null and b/assest/background/back1.png differ
diff --git a/assest/background/back2.png b/assest/background/back2.png
new file mode 100644
index 0000000..5c05343
Binary files /dev/null and b/assest/background/back2.png differ
diff --git a/assest/background/back3.png b/assest/background/back3.png
new file mode 100644
index 0000000..bc5b5f2
Binary files /dev/null and b/assest/background/back3.png differ
diff --git a/assest/background/back4.png b/assest/background/back4.png
new file mode 100644
index 0000000..9454d5e
Binary files /dev/null and b/assest/background/back4.png differ
diff --git a/assest/background/back5.png b/assest/background/back5.png
new file mode 100644
index 0000000..ee5614d
Binary files /dev/null and b/assest/background/back5.png differ
diff --git a/assest/beard.png b/assest/beard.png
new file mode 100644
index 0000000..ece3102
Binary files /dev/null and b/assest/beard.png differ
diff --git a/assest/birthday-hat.png b/assest/birthday-hat.png
new file mode 100644
index 0000000..3fc8422
Binary files /dev/null and b/assest/birthday-hat.png differ
diff --git a/assest/eye1.png b/assest/eye1.png
new file mode 100644
index 0000000..c6fb1a0
Binary files /dev/null and b/assest/eye1.png differ
diff --git a/assest/eye2.png b/assest/eye2.png
new file mode 100644
index 0000000..e53e72f
Binary files /dev/null and b/assest/eye2.png differ
diff --git a/assest/face.jpg b/assest/face.jpg
new file mode 100644
index 0000000..cb38dea
Binary files /dev/null and b/assest/face.jpg differ
diff --git a/assest/flower-crown.png b/assest/flower-crown.png
new file mode 100644
index 0000000..d2b454b
Binary files /dev/null and b/assest/flower-crown.png differ
diff --git a/assest/funny-sunglass.png b/assest/funny-sunglass.png
new file mode 100644
index 0000000..6d673c7
Binary files /dev/null and b/assest/funny-sunglass.png differ
diff --git a/assest/ghoul2.png b/assest/ghoul2.png
new file mode 100644
index 0000000..8b4ca39
Binary files /dev/null and b/assest/ghoul2.png differ
diff --git a/assest/gold-crown.png b/assest/gold-crown.png
new file mode 100644
index 0000000..9838f0e
Binary files /dev/null and b/assest/gold-crown.png differ
diff --git a/assest/hair9.png b/assest/hair9.png
new file mode 100644
index 0000000..b835895
Binary files /dev/null and b/assest/hair9.png differ
diff --git a/assest/moustache2.png b/assest/moustache2.png
new file mode 100644
index 0000000..c26900e
Binary files /dev/null and b/assest/moustache2.png differ
diff --git a/assest/queens-crown.png b/assest/queens-crown.png
new file mode 100644
index 0000000..dfcf506
Binary files /dev/null and b/assest/queens-crown.png differ
diff --git a/sunglasses.png b/assest/sunglasses.png
similarity index 100%
rename from sunglasses.png
rename to assest/sunglasses.png
diff --git a/assest/tongue.png b/assest/tongue.png
new file mode 100644
index 0000000..93636b8
Binary files /dev/null and b/assest/tongue.png differ
diff --git a/background_image/backgorund_image.py b/background_image/backgorund_image.py
new file mode 100644
index 0000000..b6b2705
--- /dev/null
+++ b/background_image/backgorund_image.py
@@ -0,0 +1,23 @@
+import pixellib
+from check_directory import saveimage,check_image
+from pixellib.tune_bg import alter_bg
+def backgroundImage(type,input_image,color,back_ground=''):
+ path_name =check_image()
+ if path_name:
+ change_bg = alter_bg(model_type = "pb")
+ change_bg.load_pascalvoc_model("xception_pascalvoc.pb")
+
+ if type == 'image':
+ change_bg.change_bg_img(f_image_path = input_image,b_image_path = back_ground, output_image_name=path_name)
+ if type == 'gray':
+ change_bg.gray_bg(input_image, output_image_name=path_name)
+ if type =='color':
+ change_bg.color_bg(input_image, colors=color, output_image_name=path_name)
+ if type == 'blur':
+ change_bg.blur_bg(input_image, low=True, output_image_name=path_name)
+ saveimage(path_name)
+
+
+
+
+# backgroundImage('gray','Output_image/test2.png','Input_image/lena.jpg')
diff --git a/background_image/check_directory.py b/background_image/check_directory.py
new file mode 100644
index 0000000..3691be4
--- /dev/null
+++ b/background_image/check_directory.py
@@ -0,0 +1,46 @@
+from os.path import exists
+import os
+import cv2
+import requests
+def check_image():
+ path = input('enter the name of path output : ')
+ path_image = f'Output_image/{path}.png'
+ file_exists = exists(path_image)
+ while file_exists:
+ path_name2 = input('this name is exist try another one or enter q : ')
+ if path_name2 == 'q':
+ path_image = False
+ break
+
+ path_image = f'Output_image/{path_name2}.png'
+ file_exists = exists(path_image)
+ return path_image
+
+def saveimage(path):
+ print("close the window for image")
+ image = cv2.imread(path)
+ cv2.imshow('image window', image)
+ cv2.waitKey(0)
+ cv2.destroyAllWindows()
+ save = input('do you want to save image ?')
+ if not save == 'yes':
+ os.remove(path)
+ return
+def add_path(image_path,origin):
+ path_name = input('add path name : ')
+ r= requests.get(image_path)
+ path_image = f'{origin}{path_name}.png'
+ file_exists = exists(path_image)
+ while file_exists :
+ path_name2 = input('this name is exist try another one or enter q : ')
+ if path_name2 =='q':
+ path_image=''
+ break
+
+ path_image = f'{origin}{path_name2}.png'
+ file_exists = exists(path_image)
+
+ with open(path_image,'wb') as f :
+ f.write(r.content)
+
+ return path_image
\ No newline at end of file
diff --git a/background_image/filter_background_image.py b/background_image/filter_background_image.py
new file mode 100644
index 0000000..71c5b57
--- /dev/null
+++ b/background_image/filter_background_image.py
@@ -0,0 +1,20 @@
+from backgorund_image import backgroundImage
+from check_directory import add_path
+def filter_background_image():
+ type_filter = int(input('type_filter : '))
+ background_filter =''
+ color = ''
+ type_of_background = input('enter type of background : [gray,image,blur,color] : ')
+ path_input_image = add_path(input('path image : '),'Input_image/')
+ if type_filter ==0 and type_of_background == 'image' :
+ background_filter = add_path(input('path image : '), 'input_background/')
+ backgroundImage(type_of_background, path_input_image,color, background_filter)
+ if type_of_background=='color':
+ add_color = input('input color rgb : 0,0,255 : ')
+ color = tuple(map(int, add_color.split(',')))
+
+ if type_filter == 1 :
+ backgroundImage(type_of_background,path_input_image,color,'../assest/background/back1.jpg')
+ if type_filter == 2 :
+ backgroundImage(type_of_background,path_input_image,background_filter,color,'../assest/background/back2.jpg')
+filter_background_image()
\ No newline at end of file
diff --git a/background_live/__pycache__/background_live.cpython-39.pyc b/background_live/__pycache__/background_live.cpython-39.pyc
new file mode 100644
index 0000000..4c5d07d
Binary files /dev/null and b/background_live/__pycache__/background_live.cpython-39.pyc differ
diff --git a/background_live/__pycache__/request_image.cpython-39.pyc b/background_live/__pycache__/request_image.cpython-39.pyc
new file mode 100644
index 0000000..4d607f7
Binary files /dev/null and b/background_live/__pycache__/request_image.cpython-39.pyc differ
diff --git a/background_live/background_live.py b/background_live/background_live.py
new file mode 100644
index 0000000..3dbee72
--- /dev/null
+++ b/background_live/background_live.py
@@ -0,0 +1,82 @@
+import cv2
+import mediapipe as mp
+import numpy as np
+from request_image import add_path
+import os
+def background(path,blur =1,img=2):
+ mp_selfie_segmentation = mp.solutions.selfie_segmentation
+ back1=1
+ cam = cv2.VideoCapture(0)
+ cam.set(3, 1280)
+ cam.set(4, 720)
+ fsize = (520, 720)
+
+
+
+ # begin with selfie segmentation model
+
+ img_name = ""
+ while cam.isOpened():
+ scene = cv2.imread(path) # read the scene image
+ scene = cv2.blur(scene, (blur, blur))
+ scene = cv2.resize(scene, (fsize[1], fsize[0])) # resize scene to the size of frame
+ with mp_selfie_segmentation.SelfieSegmentation(model_selection=1) as selfie_seg:
+ bg_image = scene
+ ret, frame = cam.read()
+ if not ret:
+ print("Error reading frame...")
+ continue
+ frame = cv2.resize(frame, (fsize[1], fsize[0]))
+
+ # flip it to look like selfie camera
+ frame = cv2.flip(frame, 1)
+
+ # get rgb image to pass that on selfie segmentation
+ rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+
+ # process it!
+ results = selfie_seg.process(rgb)
+
+ # get the condition from result's segmentation mask
+ condition = np.stack((results.segmentation_mask,) * 3, axis=-1) > 0.1
+
+ # apply background change if condition matches
+ output_image = np.where(condition, frame, bg_image)
+
+ # show the output
+ cv2.imshow('Background Change with MP', output_image)
+ key = cv2.waitKey(5) & 0xFF
+
+ if key == ord('n'):
+ back1+=1
+ if back1 ==img+1:
+ back1=1
+ path = f'../assest/background/back{back1}.png'
+ if key == ord('a'):
+ path = add_path(input('path image : '))
+ # wait until any key is pressed
+
+ if key == ord('q'):
+ cam.release()
+ cv2.destroyAllWindows() # wait until any key is pressed
+ elif key == ord("c"):
+ img_name = "../saved/opencv_frame.png"
+ cv2.imwrite(img_name, output_image)
+ # print("{} written!".format(img_name))
+ break
+ frames = cv2.imread(img_name)
+ cv2.imshow("Background Change with MP", frames)
+ key = cv2.waitKey(0)
+ os.remove("../saved/opencv_frame.png")
+ if key == ord("s"):
+ user_name = input("enter name")
+ imgdir = f"../saved/{user_name}.png"
+ cv2.imwrite(imgdir, frames)
+ background('../assest/background/back1.png',blur,img)
+ if key == ord("e"):
+ background('../assest/background/back1.png',blur,img)
+ cam.release()
+ cv2.destroyAllWindows()
+
+
+
diff --git a/background_live/filter_background_live.py b/background_live/filter_background_live.py
new file mode 100644
index 0000000..b002c31
--- /dev/null
+++ b/background_live/filter_background_live.py
@@ -0,0 +1,13 @@
+from background_live import background
+from request_image import add_path,check_image
+from os.path import exists
+
+type_filter = int(input('type_filter : '))
+type_blur=int(input('number for blur start from 1 : '))
+if type_filter == 0:
+ path,img = add_path(input('path image : '))
+ background('../assest/background/back1.png', type_blur,img)
+if type_filter ==1:
+ img =check_image()
+ print(img)
+ background('../assest/background/back1.png',type_blur,img)
diff --git a/background_live/request_image.py b/background_live/request_image.py
new file mode 100644
index 0000000..1e548ca
--- /dev/null
+++ b/background_live/request_image.py
@@ -0,0 +1,27 @@
+import requests
+from os.path import exists
+def add_path(image_path):
+ r= requests.get(image_path)
+ img =1
+ path_image = f'../assest/background/back{img}.png'
+ file_exists = exists(path_image)
+ while file_exists :
+ img+=1
+ path_image = f'../assest/background/back{img}.png'
+ file_exists = exists(path_image)
+
+ with open(path_image,'wb') as f :
+ f.write(r.content)
+
+ return path_image,img
+
+def check_image():
+ img = 1
+ path_image = f'../assest/background/back{img}.png'
+ file_exists = exists(path_image)
+ while file_exists:
+ img += 1
+ path_image = f'../assest/background/back{img}.png'
+ file_exists = exists(path_image)
+ return img-1
+
diff --git a/filters/__pycache__/dogfilter.cpython-38.pyc b/filters/__pycache__/dogfilter.cpython-38.pyc
deleted file mode 100644
index 4f50e2f..0000000
Binary files a/filters/__pycache__/dogfilter.cpython-38.pyc and /dev/null differ
diff --git a/filters/__pycache__/dogfilter.cpython-39.pyc b/filters/__pycache__/dogfilter.cpython-39.pyc
deleted file mode 100644
index 7d42965..0000000
Binary files a/filters/__pycache__/dogfilter.cpython-39.pyc and /dev/null differ
diff --git a/filters/__pycache__/filter1.cpython-38.pyc b/filters/__pycache__/filter1.cpython-38.pyc
deleted file mode 100644
index 43ce4ed..0000000
Binary files a/filters/__pycache__/filter1.cpython-38.pyc and /dev/null differ
diff --git a/filters/__pycache__/filter1.cpython-39.pyc b/filters/__pycache__/filter1.cpython-39.pyc
deleted file mode 100644
index dfa6a86..0000000
Binary files a/filters/__pycache__/filter1.cpython-39.pyc and /dev/null differ
diff --git a/filters/__pycache__/glasses_black.cpython-39.pyc b/filters/__pycache__/glasses_black.cpython-39.pyc
deleted file mode 100644
index aff36c7..0000000
Binary files a/filters/__pycache__/glasses_black.cpython-39.pyc and /dev/null differ
diff --git a/filters/__pycache__/gost.cpython-39.pyc b/filters/__pycache__/gost.cpython-39.pyc
deleted file mode 100644
index ce4d03b..0000000
Binary files a/filters/__pycache__/gost.cpython-39.pyc and /dev/null differ
diff --git a/filters/__pycache__/mustache_filter.cpython-39.pyc b/filters/__pycache__/mustache_filter.cpython-39.pyc
deleted file mode 100644
index 8cdfbd3..0000000
Binary files a/filters/__pycache__/mustache_filter.cpython-39.pyc and /dev/null differ
diff --git a/filters/__pycache__/mustache_filter_2.cpython-39.pyc b/filters/__pycache__/mustache_filter_2.cpython-39.pyc
deleted file mode 100644
index 4289c9b..0000000
Binary files a/filters/__pycache__/mustache_filter_2.cpython-39.pyc and /dev/null differ
diff --git a/filters/cat-filter.py b/filters/cat-filter.py
deleted file mode 100644
index 7362ab5..0000000
--- a/filters/cat-filter.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import cv2
-import numpy as np
-import dlib
-from math import hypot
-filter_image = cv2.imread("assest/cat-ears.png")
-filter_image3 = cv2.imread("assest/cat-nose.png")
-# filter_image4 = cv2.imread("assest\eye1.png")
-# filter_image5 = cv2.imread("assest\eye2.png")
-
-
-# Loading Face detector
-detector = dlib.get_frontal_face_detector()
-predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat")
-
-def filteringmouse(cap,rows, cols):
- filter1 = np.zeros((rows, cols), np.uint8)
- _, frame = cap.read()
- filter1.fill(0)
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- faces = detector(frame)
-
- # if faces:
- try:
- filter(frame,gray_frame,faces,filter_image,27,27,1.7,0.5,120)
- filter(frame, gray_frame, faces, filter_image3,28,28,1.1,1,0)
-
- # filter(frame, gray_frame, faces, filter_image3,27,27,1.5,1,100)
- # filter(frame, gray_frame, faces, filter_image4,40,40,0.2,0.4,5)
- # filter(frame, gray_frame, faces, filter_image5,46,46,0.2,0.4,5)
-
-
-
- except:
- _, frame_f = cap.read()
- cv2.imshow("Frame", frame_f)
- # else:
- # _, frame_f = cap.read()
- # cv2.imshow("Frame", frame_f)
-def filter(frame,gray_frame,faces,filter_image1,X,Y,width,height,above=0,left=0):
- for face in faces:
- landmarks = predictor(gray_frame, face)
-
- # filter coordinates
- # top_filter = (landmarks.part(27).x+10, landmarks.part(24).y+10)
- center_filter = (landmarks.part(X).x-left, landmarks.part(Y).y-above)
- left_filter = (landmarks.part(4).x, landmarks.part(4).y)
- right_filter = (landmarks.part(14).x, landmarks.part(14).y)
-
- filter_width = int(hypot(left_filter[0] - right_filter[0],
- left_filter[1] - right_filter[1]) * width)
- filter_height = int(filter_width * height)
-
- # New filter position
- top_left = (int(center_filter[0] - filter_width / 2),
- int(center_filter[1] - filter_height / 2))
- bottom_right = (int(center_filter[0] + filter_width / 2),
- int(center_filter[1] + filter_height / 2))
-
- # Adding the new filter
- # coloring
- filtery = cv2.resize(filter_image1, (filter_width, filter_height))
- filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)
- _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV)
-
- filter_area = frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width]
- filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)
- final_filter = cv2.add(filter_area_no_filter, filtery)
-
- frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width,:] = final_filter
-
- cv2.imshow("Frame", frame)
\ No newline at end of file
diff --git a/filters/dogfilter.py b/filters/dogfilter.py
deleted file mode 100644
index 964de52..0000000
--- a/filters/dogfilter.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import cv2
-import numpy as np
-import dlib
-from math import hypot
-filter_image = cv2.imread("assest\dogfilter.png")
-# Loading Face detector
-detector = dlib.get_frontal_face_detector()
-predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat")
-
-def filteringdog(cap,rows, cols):
- filter1 = np.zeros((rows, cols), np.uint8)
- _, frame = cap.read()
- filter1.fill(0)
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- faces = detector(frame)
- # if faces:
- try:
-
- for face in faces:
- landmarks = predictor(gray_frame, face)
-
- # filter coordinates
- # top_filter = (landmarks.part(27).x, landmarks.part(24).y)
- center_filter = (landmarks.part(27).x, landmarks.part(27).y)
- left_filter = (landmarks.part(2).x, landmarks.part(2).y)
- right_filter = (landmarks.part(14).x, landmarks.part(14).y)
-
- filter_width = int(hypot(left_filter[0] - right_filter[0],
- left_filter[1] - right_filter[1]) * 1.3)
- filter_height = int(filter_width *1.5)
-
- # New filter position
- top_left = (int(center_filter[0] - filter_width / 2),
- int(center_filter[1] - filter_height / 2))
- bottom_right = (int(center_filter[0] + filter_width / 2),
- int(center_filter[1] + filter_height / 2))
-
- # Adding the new filter
- # coloring
- filtery = cv2.resize(filter_image, (filter_width, filter_height))
- filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)
- _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV)
-
- filter_area = frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width]
- filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)
- final_filter = cv2.add(filter_area_no_filter, filtery)
-
- frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width] = final_filter
-
- cv2.imshow("Frame", frame)
- except:
- _, frame_f = cap.read()
- cv2.imshow("Frame", frame_f)
- # else:
- # _, frame_f = cap.read()
- # cv2.imshow("Frame", frame_f)
diff --git a/filters/face_filter_crown.py b/filters/face_filter_crown.py
deleted file mode 100644
index d2ca543..0000000
--- a/filters/face_filter_crown.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# import cv2
-# import numpy as np
-# import dlib
-# from math import hypot
-# filter_image = cv2.imread("../assest/flower-crown-png-42606.png")
-#
-# # Loading Face detector
-# detector = dlib.get_frontal_face_detector()
-# predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
-#
-# cap = cv2.VideoCapture(0)
-#
-# while True :
-# _, frame = cap.read()
-# gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
-# faces = detector(frame)
-# for face in faces :
-# landmarks = predictor(gray_frame, face)
-# top_glasses = (landmarks.part(24).x , landmarks.part(24).y)
-# left_glasses = (landmarks.part(0).x , landmarks.part(0).y)
-# right_glasses = (landmarks.part(16).x , landmarks.part(16).y)
-# center_glasses = (landmarks.part(27).x , landmarks.part(27).y)
-#
-# glasses_width = int (hypot(left_glasses[0] -right_glasses[0],
-# left_glasses[1] - right_glasses[1]))
-#
-# glasses_height = int(glasses_width * 0.6)
-#
-# # positios
-# upper_left = (int(center_glasses[0] - glasses_width/2 ),
-# int(center_glasses[1] - glasses_height/2))
-# lower_right = (int(center_glasses[0] + glasses_width / 2),
-# int(center_glasses[1] + glasses_height / 2))
-#
-# # Adding the glasses in the correct position
-# glasses = cv2.resize(filter_image,(glasses_width , glasses_height))
-# gray_glasses = cv2.cvtColor(glasses, cv2.COLOR_BGR2GRAY)
-#
-#
-# _, glasses_mask = cv2.threshold(gray_glasses,120, 225, cv2.THRESH_BINARY_INV)
-# glasses_area = frame[upper_left[1] : upper_left[1]+glasses_height , upper_left[0]:upper_left[0]+glasses_width]
-# glasses_ares_no_glasses = cv2.bitwise_and(glasses_area , glasses_area,mask= glasses_mask)
-# final_glasses = cv2.add(glasses_ares_no_glasses, glasses)
-# frame[upper_left[1]: upper_left[1] + glasses_height, upper_left[0]:upper_left[0] + glasses_width] = final_glasses
-#
-# cv2.imshow("Frame",frame)
-#
-#
-#
-# key = cv2.waitKey(1)
-# if key ==27 :
-# break
\ No newline at end of file
diff --git a/filters/filter1.py b/filters/filter1.py
deleted file mode 100644
index 0946dc0..0000000
--- a/filters/filter1.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import cv2
-import numpy as np
-import dlib
-from math import hypot
-
-filter_image = cv2.imread("assest\image1.png")
-# Loading Face detector
-detector = dlib.get_frontal_face_detector()
-predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat")
-
-def filtering(cap,rows, cols):
- filter1 = np.zeros((rows, cols), np.uint8)
- _, frame = cap.read()
- filter1.fill(0)
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- faces = detector(frame)
- if faces:
- try:
-
- for face in faces:
- landmarks = predictor(gray_frame, face)
-
- # filter coordinates
- # top_filter = (landmarks.part(27).x, landmarks.part(24).y)
- center_filter = (landmarks.part(32).x, landmarks.part(24).y)
- left_filter = (landmarks.part(3).x, landmarks.part(2).y)
- right_filter = (landmarks.part(13).x, landmarks.part(13).y)
-
- filter_width = int(hypot(left_filter[0] - right_filter[0],
- left_filter[1] - right_filter[1]) * 1.7)
- filter_height = int(filter_width *1.1)
-
- # New filter position
- top_left = (int(center_filter[0] - filter_width / 2),
- int(center_filter[1] - filter_height / 2))
- bottom_right = (int(center_filter[0] + filter_width / 2),
- int(center_filter[1] + filter_height / 2))
-
- # Adding the new filter
- # coloring
- filtery = cv2.resize(filter_image, (filter_width, filter_height))
- filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)
- _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV)
-
- filter_area = frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width]
- filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)
- final_filter = cv2.add(filter_area_no_filter, filtery)
-
- frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width] = final_filter
-
- cv2.imshow("Frame", frame)
- except:
- _, frame_f = cap.read()
- cv2.imshow("Frame", frame_f)
- else:
- _, frame_f = cap.read()
- cv2.imshow("Frame", frame_f)
diff --git a/filters/head_crown.py b/filters/head_crown.py
deleted file mode 100644
index 4bf6fa2..0000000
--- a/filters/head_crown.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import cv2
-import numpy as np
-import dlib
-from math import hypot
-filter_image = cv2.imread("assest/flower-crown-png-42606.png")
-
-# Loading Face detector
-detector = dlib.get_frontal_face_detector()
-predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat")
-
-def filteringmouse(cap,rows, cols):
- filter1 = np.zeros((rows, cols), np.uint8)
- _, frame = cap.read()
- filter1.fill(0)
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- faces = detector(frame)
-
- # if faces:
- try:
- filter(frame,gray_frame,faces,filter_image,27,27,1.2,0.6,20)
- # filter(frame, gray_frame, faces, filter_image3,27,27,1.5,2,25)
- except:
- _, frame_f = cap.read()
- cv2.imshow("Frame", frame_f)
- # else:
- # _, frame_f = cap.read()
- # cv2.imshow("Frame", frame_f)
-def filter(frame,gray_frame,faces,filter_image1,X,Y,width,height,above=0,left=0):
- for face in faces:
- landmarks = predictor(gray_frame, face)
-
- # filter coordinates
- # top_filter = (landmarks.part(27).x+10, landmarks.part(24).y+10)
- center_filter = (landmarks.part(X).x-left, landmarks.part(Y).y-above)
- left_filter = (landmarks.part(4).x, landmarks.part(4).y)
- right_filter = (landmarks.part(14).x, landmarks.part(14).y)
-
- filter_width = int(hypot(left_filter[0] - right_filter[0],
- left_filter[1] - right_filter[1]) * width)
- filter_height = int(filter_width * height)
-
- # New filter position
- top_left = (int(center_filter[0] - filter_width / 2),
- int(center_filter[1] - filter_height ))
- bottom_right = (int(center_filter[0] + filter_width / 2),
- int(center_filter[1] + filter_height / 2))
-
- # Adding the new filter
- # coloring
- filtery = cv2.resize(filter_image1, (filter_width, filter_height))
- filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)
- _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV)
-
- filter_area = frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width]
- filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)
- final_filter = cv2.add(filter_area_no_filter, filtery)
-
- frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width,:] = final_filter
-
- cv2.imshow("Frame", frame)
\ No newline at end of file
diff --git a/filters/mustache_filter_2.py b/filters/mustache_filter_2.py
deleted file mode 100644
index 130ea16..0000000
--- a/filters/mustache_filter_2.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import cv2
-import numpy as np
-import dlib
-from math import hypot
-filter_image = cv2.imread("assest/10-2-moustache-free-png-image.png")
-
-# Loading Face detector
-detector = dlib.get_frontal_face_detector()
-predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat")
-
-def filteringmouse(cap,rows, cols):
- filter1 = np.zeros((rows, cols), np.uint8)
- _, frame = cap.read()
- filter1.fill(0)
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- faces = detector(frame)
-
- # if faces:
- try:
- filter(frame,gray_frame,faces,filter_image,51,51,1,0.5,1,-3)
- # filter(frame, gray_frame, faces, filter_image3,27,27,1.5,2,25)
- except:
- _, frame_f = cap.read()
- cv2.imshow("Frame", frame_f)
- # else:
- # _, frame_f = cap.read()
- # cv2.imshow("Frame", frame_f)
-def filter(frame,gray_frame,faces,filter_image1,X,Y,width,height,above=0,left=0):
- for face in faces:
- landmarks = predictor(gray_frame, face)
-
- # filter coordinates
- # top_filter = (landmarks.part(27).x+10, landmarks.part(24).y+10)
- center_filter = (landmarks.part(X).x-left, landmarks.part(Y).y-above)
- left_filter = (landmarks.part(4).x, landmarks.part(4).y)
- right_filter = (landmarks.part(12).x, landmarks.part(12).y)
-
- filter_width = int(hypot(left_filter[0] - right_filter[0],
- left_filter[1] - right_filter[1]) * width)
- filter_height = int(filter_width * height)
-
- # New filter position
- top_left = (int(center_filter[0] - filter_width / 2),
- int(center_filter[1] - filter_height /2 ))
- bottom_right = (int(center_filter[0] + filter_width / 2),
- int(center_filter[1] + filter_height / 2))
-
- # Adding the new filter
- # coloring
- filtery = cv2.resize(filter_image1, (filter_width, filter_height))
- filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)
- _, filter1 = cv2.threshold(filtery_gray, 25, 225, cv2.THRESH_BINARY_INV)
-
- filter_area = frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width]
- filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)
- final_filter = cv2.add(filter_area_no_filter, filtery)
-
- frame[top_left[1]: top_left[1] + filter_height,
- top_left[0]: top_left[0] + filter_width,:] = final_filter
-
- cv2.imshow("Frame", frame)
\ No newline at end of file
diff --git a/filters_image/image_filtering_face.py b/filters_image/image_filtering_face.py
new file mode 100644
index 0000000..85353b8
--- /dev/null
+++ b/filters_image/image_filtering_face.py
@@ -0,0 +1,104 @@
+import cv2
+import numpy as np
+import dlib
+from math import hypot
+from filters_live.change_filter import change_filter
+import os
+
+# Loading Face detector
+
+def image_filtering_face(path_filter,path_image,center,width,height,up,left,counte=0):
+ # path = r"../assest/moustache2.png"
+ filter_image = []
+ for i in path_filter:
+ filter_image.append(cv2.imread(i))
+ image = cv2.imread(path_image)
+ rows, cols, _ = image.shape
+ filter1 = np.zeros((rows, cols), np.uint8)
+ filter1.fill(0)
+ gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ detector = dlib.get_frontal_face_detector()
+ faces = detector(image)
+ if faces:
+ try:
+ for i in range(len(path_filter)):
+ filter(image,gray_image,faces,filter_image[i],center[i],width[i],height[i],up[i],left[i])
+ except:
+ image = cv2.imread(path_image)
+ cv2.imshow("Frame", image)
+ else:
+ image = cv2.imread(path_image)
+ cv2.imshow("Frame", image)
+ key = cv2.waitKey(0)
+ if key == ord('n'):
+ change_image(counte,path_image)
+ elif key == ord('q'):
+ cv2.destroyAllWindows()
+ # elif key == ord("c"):
+ # img_name = "../saved/opencv_frame.png"
+ # cv2.imwrite(img_name, image)
+ # print("{} written!".format(img_name))
+ #
+ # image = cv2.imread(img_name)
+ # cv2.imshow("Frame", image)
+ # key = cv2.waitKey(0)
+ # os.remove("../saved/opencv_frame.png")
+ if key == ord("s"):
+ user_name = input("enter name")
+ imgdir = f"../saved/{user_name}.png"
+ cv2.imwrite(imgdir, image)
+ image_filtering_face(["../assest/tongue.png"],"../assest/face.jpg",[57],[0.6],[1.2],[-25],[0])
+ if key == ord("e"):
+ image_filtering_face(["../assest/tongue.png"],"../assest/face.jpg",[57],[0.6],[1.2],[-25],[0])
+
+
+
+def filter(image,gray_frame,faces,filter_image1,center,width,height,up=0,left=0):
+ predictor_path = r"../assest/shape_predictor_68_face_landmarks.dat"
+ predictor = dlib.shape_predictor(predictor_path)
+
+ for face in faces:
+ landmarks = predictor(gray_frame, face)
+
+ center_filter = (landmarks.part(center).x-left, landmarks.part(center).y-up)
+ left_filter = (landmarks.part(4).x, landmarks.part(4).y)
+ right_filter = (landmarks.part(14).x, landmarks.part(14).y)
+
+ filter_width = int(hypot(left_filter[0] - right_filter[0],
+ left_filter[1] - right_filter[1]) * width)
+ filter_height = int(filter_width * height)
+
+ # New filter position
+ top_left = (int(center_filter[0] - filter_width / 2),
+ int(center_filter[1] - filter_height / 2))
+ bottom_right = (int(center_filter[0] + filter_width / 2),
+ int(center_filter[1] + filter_height / 2))
+
+ # Adding the new filter
+ filtery = cv2.resize(filter_image1, (filter_width, filter_height))
+ filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)
+ _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV)
+
+ filter_area = image[top_left[1]: top_left[1] + filter_height,
+ top_left[0]: top_left[0] + filter_width]
+ filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)
+ final_filter = cv2.add(filter_area_no_filter, filtery)
+
+ image[top_left[1]: top_left[1] + filter_height,
+ top_left[0]: top_left[0] + filter_width,:] = final_filter
+
+ cv2.imshow("Frame", image)
+
+def change_image(i,path_image):
+ image_filtering_face(
+ change_filter[i]['filter'],
+ path_image,
+ change_filter[i]['center'],
+ change_filter[i]['width'],
+ change_filter[i]['height'],
+ change_filter[i]['up'],
+ change_filter[i]['left'],
+ change_filter[i]['counte']
+ )
+if __name__ == "__main__":
+ image_filtering_face(["../assest/tongue.png"],"../assest/face.jpg",[57],[0.6],[1.2],[-25],[0])
\ No newline at end of file
diff --git a/filters_live/change_filter.py b/filters_live/change_filter.py
new file mode 100644
index 0000000..2285361
--- /dev/null
+++ b/filters_live/change_filter.py
@@ -0,0 +1,69 @@
+## jaso file for adding
+
+change_filter =[{'filter':['../assest/hair9.png','../assest/ghoul2.png','../assest/eye1.png','../assest/eye2.png'],
+ 'center':[27,66,40,46],
+ 'width':[1.5,1,0.2,0.2],
+ 'height':[1,1,0.4,0.4],
+ 'up':[100,20,5,5],
+ 'left':[0,0,0,0],
+ 'counte':1},
+ {'filter': ["../assest/birthday-hat.png"],
+ 'center': [27],
+ 'width': [1.3],
+ 'height': [1],
+ 'up': [120],
+ 'left': [0],
+ 'counte':2},
+ {'filter': ["../assest/gold-crown.png"],
+ 'center': [27],
+ 'width': [1.3],
+ 'height': [1],
+ 'up': [120],
+ 'left': [0],
+ 'counte':3},
+ {'filter': ["../assest/queens-crown.png"],
+ 'center': [27],
+ 'width': [1.3],
+ 'height': [1],
+ 'up': [120],
+ 'left': [0],
+ 'counte':4},
+ {'filter': ["../assest/flower-crown.png"],
+ 'center': [27],
+ 'width': [1.3],
+ 'height': [1],
+ 'up': [80],
+ 'left': [0],
+ 'counte': 5},
+ {'filter': ["../assest/funny-sunglass.png"],
+ 'center': [27],
+ 'width': [1.05],
+ 'height': [0.33],
+ 'up': [0],
+ 'left': [0],
+ 'counte': 6},
+ {'filter': ['../assest/moustache2.png'],
+ 'center': [51],
+ 'width': [1],
+ 'height': [0.5],
+ 'up': [1],
+ 'left': [-3],
+ 'counte': 7},
+ {'filter': ["../assest/beard.png"],
+ 'center': [8],
+ 'width': [1.1],
+ 'height': [1.5],
+ 'up': [0],
+ 'left': [0],
+ 'counte': 8},
+ {'filter': ["../assest/tongue.png"],
+ 'center': [57],
+ 'width': [0.6],
+ 'height': [1.2],
+ 'up': [-25],
+ 'left': [0],
+ 'counte': 0},
+
+
+ ]
+
diff --git a/filters/glasses_black.py b/filters_live/glasses_black.py
similarity index 100%
rename from filters/glasses_black.py
rename to filters_live/glasses_black.py
diff --git a/filters/gost.py b/filters_live/gost.py
similarity index 89%
rename from filters/gost.py
rename to filters_live/gost.py
index 02e84a6..ac67b37 100644
--- a/filters/gost.py
+++ b/filters_live/gost.py
@@ -2,7 +2,7 @@
import numpy as np
import dlib
from math import hypot
-filter_image = cv2.imread("assest/10-2-moustache-free-png-image.png")
+filter_image = cv2.imread("assest/moustache2.png")
# Loading Face detector
detector = dlib.get_frontal_face_detector()
diff --git a/filters/mustache_filter.py b/filters_live/mustache_filter.py
similarity index 100%
rename from filters/mustache_filter.py
rename to filters_live/mustache_filter.py
diff --git a/filters/shape_predictor_68_face_landmarks.dat b/filters_live/shape_predictor_68_face_landmarks.dat
similarity index 100%
rename from filters/shape_predictor_68_face_landmarks.dat
rename to filters_live/shape_predictor_68_face_landmarks.dat
diff --git a/filters_live/video_filtering_face.py b/filters_live/video_filtering_face.py
new file mode 100644
index 0000000..979e2a9
--- /dev/null
+++ b/filters_live/video_filtering_face.py
@@ -0,0 +1,115 @@
+import cv2
+import numpy as np
+import dlib
+from math import hypot
+from change_filter import change_filter
+import os
+# change_filter =[{'filter':['../assest/hair9.png','../assest/ghoul2.png'],'center':[27,66],'width':[1.5,1],'height':[1,1],'up':[100,20],'left':[0,0]}]
+cap = cv2.VideoCapture(0)
+_, frame = cap.read()
+rows, cols, _ = frame.shape
+filter1 = np.zeros((rows, cols), np.uint8)
+detector = dlib.get_frontal_face_detector()
+predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
+counte=0
+def video_filtering_face(path,center,width,height,up,left,counte=0):
+
+ filter_image = []
+ for i in path:
+ filter_image.append(cv2.imread(i))
+
+
+ while cap.isOpened():
+ filter1 = np.zeros((rows, cols), np.uint8)
+
+ _, frame = cap.read()
+ filter1.fill(0)
+ gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+ faces = detector(frame)
+ if faces:
+ try:
+ for i in range(len(path)):
+ filter(frame,gray_frame,faces,filter_image[i],center[i],width[i],height[i],up[i],left[i])
+ except:
+ _, frame = cap.read()
+ cv2.imshow("Frame", frame)
+ else:
+ _, frame = cap.read()
+ cv2.imshow("Frame", frame)
+ key = cv2.waitKey(1)
+ if key == ord('n'):
+ # print('first : ' ,i)
+ change_image(counte)
+ # print('second : ', i)
+ elif key == ord('q'):
+ cap.release()
+ cv2.destroyAllWindows()
+ elif key ==ord("c"):
+ img_name = "../saved/opencv_frame.png"
+ cv2.imwrite(img_name, frame)
+ # print("{} written!".format(img_name))
+ break
+ frames = cv2.imread(img_name)
+ cv2.imshow("Frame", frames)
+ key = cv2.waitKey(0)
+ os.remove("../saved/opencv_frame.png")
+ if key ==ord("s"):
+ user_name = input("enter name")
+ imgdir = f"../saved/{user_name}.png"
+ cv2.imwrite(imgdir, frames)
+ video_filtering_face(["../assest/tongue.png"], [57], [0.6], [1.2], [-25], [0])
+ if key == ord("e"):
+ video_filtering_face(["../assest/tongue.png"], [57], [0.6], [1.2], [-25], [0])
+
+
+
+def filter(frame,gray_frame,faces,filter_image1,center,width,height,up=0,left=0):
+ for face in faces:
+ landmarks = predictor(gray_frame, face)
+
+ # filter coordinates
+ # top_filter = (landmarks.part(27).x+10, landmarks.part(24).y+10)
+ # if yahia:
+ # up = (landmarks.part(8).y - landmarks.part(62).y) * 3
+ center_filter = (landmarks.part(center).x-left, landmarks.part(center).y-up)
+ left_filter = (landmarks.part(4).x, landmarks.part(4).y)
+ right_filter = (landmarks.part(14).x, landmarks.part(14).y)
+
+ filter_width = int(hypot(left_filter[0] - right_filter[0],
+ left_filter[1] - right_filter[1]) * width)
+ filter_height = int(filter_width * height)
+
+ # New filter position
+ top_left = (int(center_filter[0] - filter_width / 2),
+ int(center_filter[1] - filter_height / 2))
+ bottom_right = (int(center_filter[0] + filter_width / 2),
+ int(center_filter[1] + filter_height / 2))
+
+ # Adding the new filter
+ # coloring
+ filtery = cv2.resize(filter_image1, (filter_width, filter_height))
+ filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY)
+ _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV)
+
+ filter_area = frame[top_left[1]: top_left[1] + filter_height,
+ top_left[0]: top_left[0] + filter_width]
+ filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1)
+ final_filter = cv2.add(filter_area_no_filter, filtery)
+
+ frame[top_left[1]: top_left[1] + filter_height,
+ top_left[0]: top_left[0] + filter_width,:] = final_filter
+
+ cv2.imshow("Frame", frame)
+
+def change_image(i):
+ video_filtering_face(
+ change_filter[i]['filter'],
+ change_filter[i]['center'],
+ change_filter[i]['width'],
+ change_filter[i]['height'],
+ change_filter[i]['up'],
+ change_filter[i]['left'],
+ change_filter[i]['counte']
+ )
+if __name__ == "__main__":
+ video_filtering_face(["../assest/tongue.png"],[57],[0.6],[1.2],[-25],[0])
\ No newline at end of file
diff --git a/main.py b/main.py
index 8fa7f38..e69de29 100644
--- a/main.py
+++ b/main.py
@@ -1,17 +0,0 @@
-import cv2
-import numpy as np
-
-import dlib
-from filters.filter1 import filtering
-from filters.dogfilter import filteringdog
-# Loading Camera and Nose image and Creating mask
-cap = cv2.VideoCapture(0)
-_, frame = cap.read()
-# print(frame)
-rows, cols, _ = frame.shape
-filter1 = np.zeros((rows, cols), np.uint8)
-while True:
- filteringdog(cap,rows, cols)
- key = cv2.waitKey(1)
- if key == 27:
- break
diff --git a/saved/ yaseenmalkalbacgrond.png b/saved/ yaseenmalkalbacgrond.png
new file mode 100644
index 0000000..cbbc30f
Binary files /dev/null and b/saved/ yaseenmalkalbacgrond.png differ
diff --git a/saved/ yousef.png b/saved/ yousef.png
new file mode 100644
index 0000000..a797270
Binary files /dev/null and b/saved/ yousef.png differ
diff --git a/saved/11.png b/saved/11.png
new file mode 100644
index 0000000..35d2d4b
Binary files /dev/null and b/saved/11.png differ
diff --git a/saved/hiss.png b/saved/hiss.png
new file mode 100644
index 0000000..adc0492
Binary files /dev/null and b/saved/hiss.png differ
diff --git a/saved/opencv_frame32.png b/saved/opencv_frame32.png
new file mode 100644
index 0000000..9e90a3f
Binary files /dev/null and b/saved/opencv_frame32.png differ
diff --git a/saved/sss.png b/saved/sss.png
new file mode 100644
index 0000000..3113fb4
Binary files /dev/null and b/saved/sss.png differ
diff --git a/saved/yahia.png b/saved/yahia.png
new file mode 100644
index 0000000..a0a61f3
Binary files /dev/null and b/saved/yahia.png differ
diff --git a/saved/yahia2.png b/saved/yahia2.png
new file mode 100644
index 0000000..124fd6b
Binary files /dev/null and b/saved/yahia2.png differ
diff --git a/saved/yaseen.png b/saved/yaseen.png
new file mode 100644
index 0000000..302b72a
Binary files /dev/null and b/saved/yaseen.png differ