diff --git a/.idea/Filteristic.iml b/.idea/Filteristic.iml index 858c4d5..d28d420 100644 --- a/.idea/Filteristic.iml +++ b/.idea/Filteristic.iml @@ -1,10 +1,12 @@ - - - - - - - - - + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml index 8d93904..b534b89 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,4 +1,4 @@ - + \ No newline at end of file diff --git a/Tkinter/GUI.py b/Tkinter/GUI.py new file mode 100644 index 0000000..a099029 --- /dev/null +++ b/Tkinter/GUI.py @@ -0,0 +1,46 @@ +import tkinter as tk +from tkinter import filedialog, Text +import os, sys, subprocess +# from tkinter import * +from main import cap +from PIL import Image + +root = tk.Tk() +images = [] + +def addApp(): + + for widget in frame.winfo_children(): + widget.destroy() + + filename = filedialog.askopenfilename(initialdir="/home/yousef", title="select File", filetypes=(("executables","*.png"), ("all files","*.*"))) + images.append(filename) + for img in images: + label = tk.Label(frame, text=img, bg="gray") + label.pack() + +def runApps(): + for img in images: + if sys.platform=="win32": + os.startfile(img) + else: + opener = "open" if sys.platform == "darwin" else "xdg-open" + subprocess.call([opener, img]) + +canvas = tk.Canvas(root, width=1000,heigh=800,bg ="#2596be") +canvas.pack() +frame = tk.Frame(root, bg="white") +frame.place(relwidth=0.8, relheight=0.8, relx=0.1, rely=0.1) + +openFile= tk.Button(text="Browse", padx=18, pady=5, fg="white", bg="#2596be", command=addApp) +openFile.pack(side=tk.BOTTOM) + +secondButton= tk.Button(text="Run App", padx=18, pady=5, fg="white", bg="#2596be", command=runApps) +secondButton.pack(side=tk.BOTTOM) + + + +root.mainloop() + + + diff --git a/Tkinter/__pycache__/GUI.cpython-38.pyc b/Tkinter/__pycache__/GUI.cpython-38.pyc new file mode 100644 index 0000000..d06f0f5 Binary files /dev/null and b/Tkinter/__pycache__/GUI.cpython-38.pyc differ diff --git a/Tkinter/snapshot.py b/Tkinter/snapshot.py new file mode 100644 index 0000000..be76f80 --- /dev/null +++ b/Tkinter/snapshot.py @@ -0,0 +1,96 @@ +from tkinter import * +import cv2 +from PIL import Image, ImageTk +import time + +# from GUI import addApp, runApps +# import tkinter as tk +# root = tk.Tk() + +# root = Tk() +# from GUI import * + +from PyQt5.QtMultimedia import * +from PyQt5.QtCore import QUrl + +class App: + def __init__(self, video_source=0): + self.appName = "Filtiristic v1.0" + self.window = Tk() + self.window.title(self.appName) + self.window.resizable(0, 0) + # self.window.wm_iconbitmap("cam.ico") + self.window["bg"] = "black" + self.video_source = video_source + + self.vid = videoCapture(self.video_source) + self.label = Label(self.window, text=self.appName, font=15, bg="blue", fg="white").pack(side=TOP, fill=BOTH) + + self.canvas = Canvas(self.window, width=self.vid.width, height=self.vid.height, bg="red") + self.canvas.pack() + + self.btn_snapshot = Button(self.window, text="capture", width=15, bg="white", activebackground="red", + command=self.snapshot) + self.btn_snapshot.pack(anchor=CENTER, expand=True) + + + self.update() + self.window.mainloop() + + def snapshot(self): + check, frame = self.vid.getFrame() + if check: + image = "IMG-" + time.strftime("%H-%M-%S-%d-%m") + ".jpg" + cv2.imwrite(image, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) + + msg = Label(self.window, text="image saved "+image,bg="white", fg="magenta").place(x=460, y=510) + + # file = QUrl("click.wav") + # content = QMediaContent(file) + # self.player = QMediaContent(file) + # self.player = QMediaPlayer() + # self.player.setMedia(content) + # self.player.play() + + def update(self): + isTrue, frame = self.vid.getFrame() + + if isTrue: + self.photo = ImageTk.PhotoImage(image=Image.fromarray(frame)) + self.canvas.create_image(0, 0, image=self.photo, anchor=NW) + + self.window.after(15, self.update) + + + + + +########### Class for Capture Video##############3 +class videoCapture: + def __init__(self, video_source=0): + self.vid = cv2.VideoCapture(video_source) + if not self.vid.isOpened(): + raise ValueError("error video") + + self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH) + self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT) + + def getFrame(self): + if self.vid.isOpened(): + isTrue, frame = self.vid.read() + if isTrue: + return (isTrue, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) + else: + return (isTrue, None) + else: + return None + + def __del__(self): + if self.vid.isOpened(): + self.vid.release() + + + + +if __name__ == "__main__": + App() diff --git a/__pycache__/main.cpython-38.pyc b/__pycache__/main.cpython-38.pyc new file mode 100644 index 0000000..cae4fd6 Binary files /dev/null and b/__pycache__/main.cpython-38.pyc differ diff --git a/assest/cat-ears.png b/assest/cat-ears.png new file mode 100644 index 0000000..620308d Binary files /dev/null and b/assest/cat-ears.png differ diff --git a/assest/cat-nose.png b/assest/cat-nose.png new file mode 100644 index 0000000..576573b Binary files /dev/null and b/assest/cat-nose.png differ diff --git a/filters/__pycache__/dogfilter.cpython-38.pyc b/filters/__pycache__/dogfilter.cpython-38.pyc new file mode 100644 index 0000000..4f50e2f Binary files /dev/null and b/filters/__pycache__/dogfilter.cpython-38.pyc differ diff --git a/filters/__pycache__/filter1.cpython-38.pyc b/filters/__pycache__/filter1.cpython-38.pyc new file mode 100644 index 0000000..43ce4ed Binary files /dev/null and b/filters/__pycache__/filter1.cpython-38.pyc differ diff --git a/filters/cat-filter.py b/filters/cat-filter.py new file mode 100644 index 0000000..7362ab5 --- /dev/null +++ b/filters/cat-filter.py @@ -0,0 +1,73 @@ +import cv2 +import numpy as np +import dlib +from math import hypot +filter_image = cv2.imread("assest/cat-ears.png") +filter_image3 = cv2.imread("assest/cat-nose.png") +# filter_image4 = cv2.imread("assest\eye1.png") +# filter_image5 = cv2.imread("assest\eye2.png") + + +# Loading Face detector +detector = dlib.get_frontal_face_detector() +predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat") + +def filteringmouse(cap,rows, cols): + filter1 = np.zeros((rows, cols), np.uint8) + _, frame = cap.read() + filter1.fill(0) + gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + faces = detector(frame) + + # if faces: + try: + filter(frame,gray_frame,faces,filter_image,27,27,1.7,0.5,120) + filter(frame, gray_frame, faces, filter_image3,28,28,1.1,1,0) + + # filter(frame, gray_frame, faces, filter_image3,27,27,1.5,1,100) + # filter(frame, gray_frame, faces, filter_image4,40,40,0.2,0.4,5) + # filter(frame, gray_frame, faces, filter_image5,46,46,0.2,0.4,5) + + + + except: + _, frame_f = cap.read() + cv2.imshow("Frame", frame_f) + # else: + # _, frame_f = cap.read() + # cv2.imshow("Frame", frame_f) +def filter(frame,gray_frame,faces,filter_image1,X,Y,width,height,above=0,left=0): + for face in faces: + landmarks = predictor(gray_frame, face) + + # filter coordinates + # top_filter = (landmarks.part(27).x+10, landmarks.part(24).y+10) + center_filter = (landmarks.part(X).x-left, landmarks.part(Y).y-above) + left_filter = (landmarks.part(4).x, landmarks.part(4).y) + right_filter = (landmarks.part(14).x, landmarks.part(14).y) + + filter_width = int(hypot(left_filter[0] - right_filter[0], + left_filter[1] - right_filter[1]) * width) + filter_height = int(filter_width * height) + + # New filter position + top_left = (int(center_filter[0] - filter_width / 2), + int(center_filter[1] - filter_height / 2)) + bottom_right = (int(center_filter[0] + filter_width / 2), + int(center_filter[1] + filter_height / 2)) + + # Adding the new filter + # coloring + filtery = cv2.resize(filter_image1, (filter_width, filter_height)) + filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY) + _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV) + + filter_area = frame[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width] + filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1) + final_filter = cv2.add(filter_area_no_filter, filtery) + + frame[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width,:] = final_filter + + cv2.imshow("Frame", frame) \ No newline at end of file diff --git a/filters/filter1.py b/filters/filter1.py index 1329588..0946dc0 100644 --- a/filters/filter1.py +++ b/filters/filter1.py @@ -2,6 +2,7 @@ import numpy as np import dlib from math import hypot + filter_image = cv2.imread("assest\image1.png") # Loading Face detector detector = dlib.get_frontal_face_detector() diff --git a/main.py b/main.py index 8abb01a..8fa7f38 100644 --- a/main.py +++ b/main.py @@ -1,5 +1,6 @@ import cv2 import numpy as np + import dlib from filters.filter1 import filtering from filters.dogfilter import filteringdog @@ -13,4 +14,4 @@ filteringdog(cap,rows, cols) key = cv2.waitKey(1) if key == 27: - break \ No newline at end of file + break