diff --git a/.idea/Filteristic.iml b/.idea/Filteristic.iml index d9e6024..8388dbc 100644 --- a/.idea/Filteristic.iml +++ b/.idea/Filteristic.iml @@ -1,8 +1,8 @@ - - - - - - - + + + + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml index 8d93904..d56657a 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,4 +1,4 @@ - - - + + + \ No newline at end of file diff --git a/README.md b/README.md index 66e5c0a..976d4c9 100644 --- a/README.md +++ b/README.md @@ -51,4 +51,4 @@ Featured task: different filter names must appear to the user

Domain Modeling

-[white board](https://miro.com/app/board/o9J_lgjUC2c=/) +[white board ](https://miro.com/app/board/o9J_lgjUC2c=/) diff --git a/Tkinter/123.png b/Tkinter/123.png new file mode 100644 index 0000000..80fb972 Binary files /dev/null and b/Tkinter/123.png differ diff --git a/Tkinter/261531137_719791896078103_2600042766894789699_n.jpg b/Tkinter/261531137_719791896078103_2600042766894789699_n.jpg new file mode 100644 index 0000000..7ae2535 Binary files /dev/null and b/Tkinter/261531137_719791896078103_2600042766894789699_n.jpg differ diff --git a/Tkinter/Cat03.jpg b/Tkinter/Cat03.jpg new file mode 100644 index 0000000..a588450 Binary files /dev/null and b/Tkinter/Cat03.jpg differ diff --git a/Tkinter/GUI.py b/Tkinter/GUI.py new file mode 100644 index 0000000..856c968 --- /dev/null +++ b/Tkinter/GUI.py @@ -0,0 +1,109 @@ +import tkinter as tk +from tkinter import filedialog, Text +import os, sys, subprocess +# from tkinter import * +from tkinter.ttk import * +from tkinter import * +from PIL import Image, ImageTk +import time +import cv2 +from Tkinter.GUI_Live import camera +from tkinter_custom_button import TkinterCustomButton +from GUI_background import background_window + +# from snapshot import App, videoCapture +root = tk.Tk() +images = [] +from Tkinter.GUI_Image import importWindowyahia + + +######### filter image Window ########## + +def importFilterWindow(new): + importWindowyahia(new) + +######### background image Window ########## + +def backgroundWindow(new): + background_window(new) + +######### import Window ########## + +def importWindow(): + importWindow = Toplevel(root) + root.withdraw() + importWindow.title("Import Window") + importWindow.geometry("700x400") + Label(importWindow, text="", font=("Arial", 50)).pack() + Label(importWindow,text="Editing Type", font=("Arial", 25)).pack() + Label(importWindow, text="", font=("Arial", 25)).pack() + + filterButton = TkinterCustomButton(master =importWindow, text="Add Filter", corner_radius=5, command= lambda: importFilterWindow(importWindow), + fg_color="#FF5C58", hover_color="#ff544f", width=300, + cursor="shuttle", text_font=("sans-serif", 20)) + filterButton.pack() + + Label(importWindow, text="", font=("Arial", 25)).pack() + + backgroundButton = TkinterCustomButton(master=importWindow, text="Add Background", corner_radius=5, command=lambda: backgroundWindow(importWindow), + fg_color="#FF5C58", hover_color="#ff544f", width=300, + cursor="shuttle", text_font=("sans-serif", 20)) + + backgroundButton.pack() + + def on_closing(): + importWindow.destroy() + root.deiconify() + + importWindow.protocol("WM_DELETE_WINDOW", on_closing) + + image = PhotoImage(file='../assest/back.png') + backButton = TkinterCustomButton(master=importWindow, corner_radius=15, + command=on_closing, fg_color="#f1f1f1", hover_color="#c1c1c1", cursor="shuttle", + image=image, width=50) + + backButton.place(x=0, y=3) + +########### second-window ############# + +def videoWindow(): + cameraWindow = Toplevel(root) + root.withdraw() + cameraWindow.title("New Window") + # cameraWindow.geometry("700x700") + camera(cameraWindow) + + + ######## Back to previous window ######## + def on_closing(): + cameraWindow.destroy() + root.deiconify() + + image= PhotoImage(file ='../assest/back.png') + backButton = TkinterCustomButton(master=cameraWindow, corner_radius=15, + command=on_closing, fg_color="#f1f1f1", hover_color="#c1c1c1", cursor="shuttle", image=image, + width=50) + backButton.place(x=0,y=3) + + + cameraWindow.protocol("WM_DELETE_WINDOW", on_closing) + + + +########## Home-Page ########## +root.geometry("960x630") +image= PhotoImage(file ='../assest/2.png',width=960,height=540) +Label(root, image= image).grid(column=0,row=0,columnspan=2) +Label().grid(column=0,row=1) +importButton =TkinterCustomButton(text="Image", corner_radius=5, command=importWindow,fg_color="#FF5C58",hover_color="#ff544f",width=300, + cursor="shuttle",text_font=("sans-serif", 20)) +importButton.grid(row=2,column=0) +Label().grid(column=1,row=1) +cameraButton= TkinterCustomButton(text="Camera", corner_radius=5, command=videoWindow,fg_color="#FF5C58",hover_color="#ff544f",width=300, + cursor="shuttle",text_font=("sans-serif", 20)) +# cameraButton.place(relx=0.5, rely=0.5, anchor=CENTER) +cameraButton.grid(row=2,column=1) + + +########################################### +root.mainloop() \ No newline at end of file diff --git a/Tkinter/GUI_Image.py b/Tkinter/GUI_Image.py new file mode 100644 index 0000000..1858dea --- /dev/null +++ b/Tkinter/GUI_Image.py @@ -0,0 +1,140 @@ +import tkinter as tk +from tkinter import filedialog, Text +import os, sys, subprocess +from filters_image.change_filter import change_filter +from tkinter.ttk import * +from tkinter import * +from PIL import Image, ImageTk +from filters_image.image_filtering_face import image_filtering_face +import cv2 +from tkinter_custom_button import TkinterCustomButton +# root = tk.Tk() +images = [] +count = 0 +next = False +entry = "" +######### import Window ########## + + + +# mainloop() + + + +def importWindowyahia(root): + newWindow = Toplevel(root) + def browse(): + global count + + + filename = filedialog.askopenfilename(title="select File", + filetypes = (("all files","*.*"),("jpeg files","*.jpg"),('png files', '*.png'))) + images.append(filename) + img = Image.open(images[-1]) + img = img.resize((500, 500)) + img = ImageTk.PhotoImage(img) + img_label = Label(newWindow, image=img) + img_label.photo = img + img_label.place(x=228, y=40) + + filtering = TkinterCustomButton(master=newWindow,text="Add Filter", corner_radius=5,command=lambda: image_filter(filename, newWindow), + fg_color="#FF5C58",hover_color="#ff544f", width=200,cursor="shuttle", text_font=("sans-serif", 20)) + filtering.place(x=380, y=550) + + + + + root.withdraw() + newWindow.title("New Window") + newWindow.geometry("960x630") + browse_button = TkinterCustomButton(master=newWindow,text="Browse", corner_radius=5, command=browse, + fg_color="#FF5C58",hover_color="#ff544f", width=200, cursor="shuttle", + text_font=("sans-serif", 20)) + + + browse_button.pack() + + + ######## Back to previous window ######## + def on_closing(): + newWindow.destroy() + root.deiconify() + image= PhotoImage(file ='../assest/back.png') + backButton = TkinterCustomButton(master=newWindow, corner_radius=15, + command=on_closing,fg_color="#f1f1f1",hover_color="#c1c1c1", cursor="shuttle",image=image,width=50) + + backButton.place(x=0,y=3) + + newWindow.protocol("WM_DELETE_WINDOW", on_closing) + + def render(image_withfilter): + image_withfilter = cv2.cvtColor(image_withfilter, cv2.COLOR_BGR2RGB) + img = Image.fromarray(image_withfilter) + img = img.resize((500, 500)) + img = ImageTk.PhotoImage(img) + img_label = Label(newWindow, image=img) + img_label.photo = img + img_label.place(x=228, y=40) + # photo = ImageTk.PhotoImage(image=Image.fromarray(image_withfilter)) + # canvas.create_image(0, 0, anchor="nw", image=photo) + + def next_fun(path): + global count + + images.append(image_filtering_face( + change_filter[count]['filter'], + path, + change_filter[count]['center'], + change_filter[count]['width'], + change_filter[count]['height'], + change_filter[count]['up'], + change_filter[count]['left'], + change_filter[count]['counte'] + )) + count += 1 + if count == len(change_filter): + count = 0 + render(images[-1]) + + def submit(content,top): + global entry + entry1 = entry.get() + imgdir = f"../saved/{entry1}.png" + cv2.imwrite(imgdir, content) + entry.delete(0, END) + top.destroy() + def saving(content, newWindow): + pass + def image_filter(path, newWindow): + global count + images.append( + image_filtering_face( + change_filter[count]['filter'], + path, + change_filter[count]['center'], + change_filter[count]['width'], + change_filter[count]['height'], + change_filter[count]['up'], + change_filter[count]['left'], + change_filter[count]['counte'] + )) + save = TkinterCustomButton(master=newWindow, text="Save", corner_radius=5, + command=lambda: open_popup(images[-1],newWindow), fg_color="#FF5C58",hover_color="#ff544f", width=200, cursor="shuttle", text_font=("sans-serif", 20)) + save.place(x=50, y=550) + next = TkinterCustomButton(master=newWindow, text="Next", corner_radius=5, + command=lambda: next_fun(path), fg_color="#FF5C58",hover_color="#ff544f", width=200, cursor="shuttle", text_font=("sans-serif", 20)) + newWindow.bind("", lambda x: next_fun(path)) + next.place(x=710, y=550) + render(images[-1]) + + def open_popup(content,newWindow): + top = Toplevel(newWindow) + top.geometry("250x150") + top.title("save") + global entry + sub_btn = TkinterCustomButton(master=top, text="Submit", corner_radius=5, command=lambda: submit(content,top), + fg_color="#2da44e",hover_color="#24843f", width=100, + cursor="shuttle", text_font=("sans-serif", 20)) + entry = tk.Entry(top, width=20, bg="white") + entry.place(x=75, y=10) + sub_btn.place(x=75, y=50) diff --git a/Tkinter/GUI_Live.py b/Tkinter/GUI_Live.py new file mode 100644 index 0000000..855c9a4 --- /dev/null +++ b/Tkinter/GUI_Live.py @@ -0,0 +1,145 @@ +import tkinter as tk +from PIL import ImageTk, Image +import cv2 +from filters_live.video_filtering_face import video_filtering_face,change_filter +from tkinter_custom_button import TkinterCustomButton +from tkinter import Tk +from tkinter.ttk import Frame, Label +from background_live.request_image import add_path,check_image + +save = False +count_filter = -1 +count_back = 0 +show_filter_live = False +show_background_live = False + + + + +def camera(newWindow): + app = Frame(newWindow) + app.pack() + lmain = Label(app) + lmain.pack() + + def on_closing(top): + top.destroy() + + + def printInput(): + add_path(inputtxt2.get(1.0, "end-1c")) + + inputtxt2 = tk.Text(newWindow, + height=1, + width=20) + inputtxt2.place(x=400,y=550) + printButton = TkinterCustomButton(master=newWindow,text="Add New Background", corner_radius=5, command=printInput, + fg_color="#FF5C58",hover_color="#ff544f",width=150,cursor="shuttle", text_font=("sans-serif", 10)) + printButton.place(x=408,y=570) + + def nextback(): + global count_back, show_background_live + len_image = check_image() + if count_back == len_image: + show_background_live = False + count_back = 0 + else: + count_back += 1 + show_background_live = True + + def nextWindow(): + global count_filter, show_filter_live + if count_filter == len(change_filter) - 1: + show_filter_live = False + count_filter = -1 + else: + count_filter += 1 + show_filter_live = True + + # def saveWindow(): + + + + importButton = TkinterCustomButton(master=newWindow,text="Next background", corner_radius=5, command=nextback, + fg_color="#FF5C58",hover_color="#ff544f", + width=300, + cursor="shuttle", text_font=("sans-serif", 20)) + importButton.place(x=500,y=490) + importButton = TkinterCustomButton(master=newWindow,text="Next filter", corner_radius=5, command=nextWindow, + fg_color="#FF5C58",hover_color="#ff544f", + width=300, cursor="shuttle", text_font=("sans-serif", 20)) + importButton.place(x=160,y=490) + image = tk.PhotoImage(file='../assest/camera.png') + importButton = TkinterCustomButton(master=newWindow, corner_radius=20, + command=lambda:open_popup(newWindow), fg_color="#f1f1f1", hover_color="#c1c1c1", cursor="shuttle", + image=image, + width=50) + importButton.place(x=740,y=431) + + def video_stream3(): + + frame = video_filtering_face( + change_filter[count_filter]['filter'], + change_filter[count_filter]['center'], + change_filter[count_filter]['width'], + change_filter[count_filter]['height'], + change_filter[count_filter]['up'], + change_filter[count_filter]['left'], + f'../assest/background/back{count_back}.png', + 1,show_filter_live,show_background_live + + ) + + cv2image2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) + img2 = Image.fromarray(cv2image2) + imgtk2 = ImageTk.PhotoImage(image=img2) + lmain.imgtk = imgtk2 + lmain.configure(image=imgtk2) + if save: + save_image(frame) + lmain.after(1, video_stream3) + def save_image(frame): + global save + save = False + cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) + img = Image.fromarray(cv2image) + imgtk = ImageTk.PhotoImage(image=img) + lmain.imgtk = imgtk + lmain.configure(image=imgtk) + path_name(frame) + + def print_path2(inputtxt, frame): + global top + name = inputtxt.get(1.0, "end-1c") + img_name = f"../saved/{name}.png" + cv2.imwrite(img_name, frame) + inputtxt.delete('1.0', tk.END) + on_closing(top) + + newWindow.geometry("960x630") + newWindow.bind("", lambda x: nextWindow()) + newWindow.bind("", lambda x: nextback()) + video_stream3() + top ='' + def open_popup(newWindow): + global top + top = tk.Toplevel(newWindow) + top.geometry("250x150") + top.title("save") + global save + save = True + + def path_name(frame): + global top + inputtxt = tk.Text(top, + height=5, + width=20) + + inputtxt.pack() + printButton = TkinterCustomButton(master=top,text="Save Image", corner_radius=5, command=lambda: print_path2(inputtxt, frame), fg_color="#2da44e", + hover_color="#24843f", width=150,cursor="shuttle", text_font=("sans-serif", 20)) + printButton.pack() + +# newWindow.mainloop() + + diff --git a/Tkinter/GUI_background.py b/Tkinter/GUI_background.py new file mode 100644 index 0000000..de7c4af --- /dev/null +++ b/Tkinter/GUI_background.py @@ -0,0 +1,150 @@ +import os +from tkinter import * +from tkinter import ttk, filedialog +import requests +from PIL import Image, ImageTk +from background_image.backgorund_image import backgroundImage +from tkinter_custom_button import TkinterCustomButton +images = [] +filter_type = "" +path_name = "" + +def background_window(window_root): + window = Toplevel(window_root) + window_root.withdraw() + window.title("Import Window") + window.geometry("1000x700") + def get_filter_type(): + + global filter_type, path_name + filter_type = type_choosen.get() + if filter_type == "color": + apply() + elif filter_type == "image": + filename = filedialog.askopenfilename(title="select File", + filetypes=( + ("jpeg files", "*.jpg"), ("all files", "*.*"), + ('png files', '*.png'))) + backgroundImage(filter_type, images[-1], 0, filename) + render("../saved/temp.png") + else: + backgroundImage(filter_type, images[-1], 0, '../assest/background/back1.png') + render("../saved/temp.png") + + def on_closing(): + window.destroy() + window_root.deiconify() + + image = PhotoImage(file='../assest/back.png') + backButton = TkinterCustomButton(master=window, corner_radius=15, + command=on_closing, fg_color="#f1f1f1", hover_color="#c1c1c1", cursor="shuttle", + image=image, width=50) + + backButton.place(x=0, y=3) + + window.protocol("WM_DELETE_WINDOW", on_closing) + + def apply(): + def apply_color(): + b = {'Black':(0,0,0),'Green':(0,255,0),'beige':(207,203,169),'Blue':(0,0,255),'Red':(255,0,0)} + backgroundImage("color", images[-1], b[color_choosen.get()], '../assest/background/back1.png') + render("../saved/temp.png") + colorButton = TkinterCustomButton(master= window, text="Add color", corner_radius=5, + command=apply_color,fg_color="#FF5C58",hover_color="#ff544f", width=200, + cursor="shuttle", text_font=("sans-serif", 16)) + colorButton.place(x=510, y=550) + m = StringVar() + color_choosen = ttk.Combobox(window, width=20, textvariable=m) + color_choosen['values'] = ('Black', + 'Green', + 'beige', + 'Blue', + 'Red') + + color_choosen.place(x=720, y=559) + color_choosen.current(2) + + + def add_from_web(): + img_url = entry.get("1.0","end-1c") + r = requests.get(img_url) + with open("../saved/web.png", 'wb') as f: + f.write(r.content) + images.append("../saved/web.png") + render(images[-1]) + common() + + + def render(image_withfilter): + img = Image.open(image_withfilter) + img = img.resize((720, 480)) + img = ImageTk.PhotoImage(img) + img_label = Label(window, image=img) + img_label.photo = img + img_label.place(x=140, y=60) + + def browse(): + filename = filedialog.askopenfilename(title="select File", + filetypes = (("all files","*.*"),("jpeg files","*.jpg"),('png files', '*.png'))) + images.append(filename) + render(images[-1]) + common() + + def saving(top): + os.rename("../saved/temp.png", f"../saved/{entryy.get()}.png") + entryy.delete(0,END) + on_closing(top) + + def on_closing(top): + top.destroy() + window.deiconify() + + importButton = TkinterCustomButton(master=window, text="Browse", corner_radius=5, + command=browse, fg_color="#FF5C58",hover_color="#ff544f", width=200, + cursor="shuttle", text_font=("sans-serif", 20)) + importButton.place(x=140, y= 10) + + web_link_Button = TkinterCustomButton(master=window, text="Download", corner_radius=5, + command=add_from_web,fg_color="#FF5C58",hover_color="#ff544f", width=200, + cursor="shuttle", text_font=("sans-serif", 20)) + web_link_Button.place(x=660, y= 10) + + entry = Text(window, width=27, height=2, bg="white") + entry.place(x=435, y= 13) + entry.insert(END, 'Enter image link from web') + + + n = StringVar() + type_choosen = ttk.Combobox(window, width=20, textvariable=n) + + def open_popup(newWindow): + top = Toplevel(newWindow) + top.geometry("250x150") + top.title("save") + sub_btn = TkinterCustomButton(master=top, text="Submit", corner_radius=5, command=lambda: saving(top), + fg_color="#2da44e",hover_color="#24843f", width=100, + cursor="shuttle", text_font=("sans-serif", 20)) + + global entryy + entryy = Entry(top, width=20, bg="white") + entryy.place(x=65, y=13) + sub_btn.place(x=75, y=50) + + def common(): + typeButton = TkinterCustomButton(master=window, text="Apply Filter", corner_radius=5, + command=get_filter_type, fg_color="#FF5C58",hover_color="#ff544f", width=200, + cursor="shuttle", text_font=("sans-serif", 16)) + typeButton.place(x=140, y=550) + + saveButton = TkinterCustomButton(master=window, text="Save", corner_radius=5, + command=lambda: open_popup(window), fg_color="#FF5C58",hover_color="#ff544f", width=200, + cursor="shuttle", text_font=("sans-serif", 20)) + saveButton.place(x=400, y=600) + type_choosen['values'] = ('image', + 'gray', + 'blur', + 'color',) + + type_choosen.place(x=350, y=559) + type_choosen.current(2) + diff --git a/Tkinter/snapshot.py b/Tkinter/snapshot.py new file mode 100644 index 0000000..3b93e2b --- /dev/null +++ b/Tkinter/snapshot.py @@ -0,0 +1,89 @@ +from tkinter import * +import cv2 +from PIL import Image, ImageTk +import time + + + + +# from PyQt5.QtMultimedia import * +from PyQt5.QtCore import QUrl + +class App: + def __init__(self, video_source=0): + self.appName = "Filtiristic v1.0" + self.window = Tk() + self.window.title(self.appName) + self.window.resizable(0, 0) + # self.window.wm_iconbitmap("cam.ico") + self.window["bg"] = "black" + self.video_source = video_source + + self.vid = videoCapture(self.video_source) + self.label = Label(self.window, text=self.appName, font=15, bg="blue", fg="white").pack(side=TOP, fill=BOTH) + + self.canvas = Canvas(self.window, width=self.vid.width, height=self.vid.height, bg="red") + self.canvas.pack() + + self.btn_snapshot = Button(self.window, text="capture", width=15, bg="white", activebackground="red", + command=self.snapshot) + self.btn_snapshot.pack(anchor=CENTER, expand=True) + + + self.update() + self.window.mainloop() + + def snapshot(self): + check, frame = self.vid.getFrame() + if check: + image = "IMG-" + time.strftime("%H-%M-%S-%d-%m") + ".jpg" + cv2.imwrite(image, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) + + msg = Label(self.window, text="image saved "+image,bg="white", fg="magenta").place(x=460, y=510) + + # file = QUrl("click.wav") + # content = QMediaContent(file) + # self.player = QMediaContent(file) + # self.player = QMediaPlayer() + # self.player.setMedia(content) + # self.player.play() + + def update(self): + isTrue, frame = self.vid.getFrame() + + if isTrue: + self.photo = ImageTk.PhotoImage(image=Image.fromarray(frame)) + self.canvas.create_image(0, 0, image=self.photo, anchor=NW) + + self.window.after(15, self.update) + + +########### Class for Capture Video##############3 +class videoCapture: + def __init__(self, video_source=0): + self.vid = cv2.VideoCapture(video_source) + if not self.vid.isOpened(): + raise ValueError("error video") + + self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH) + self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT) + + def getFrame(self): + if self.vid.isOpened(): + isTrue, frame = self.vid.read() + if isTrue: + return (isTrue, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) + else: + return (isTrue, None) + else: + return None + + def __del__(self): + if self.vid.isOpened(): + self.vid.release() + + + + +if __name__ == "__main__": + App() diff --git a/Tkinter/test2.py b/Tkinter/test2.py new file mode 100644 index 0000000..9c88c9c --- /dev/null +++ b/Tkinter/test2.py @@ -0,0 +1,59 @@ +import tkinter as tk +from tkinter import filedialog, Text +import os, sys, subprocess +# from tkinter import * +from tkinter.ttk import * +from tkinter import * +from PIL import Image, ImageTk +import time +import cv2 +from Tkinter.GUI_Live import camera +# from snapshot import App, videoCapture +root = tk.Tk() +images = [] +from GUI_Image import importWindowyahia + + +######### import Window ########## + +def importWindow(): + importWindowyahia(root) + +########### second-window ############# + +def videoWindow(): + cameraWindow = Toplevel(root) + root.withdraw() + cameraWindow.title("New Window") + cameraWindow.geometry("700x700") + Label(cameraWindow, + text="This is a new window").grid() + + camera(cameraWindow) + + + ######## Back to previous window ######## + def on_closing(): + cameraWindow.destroy() + root.deiconify() + backButton = tk.Button(cameraWindow, text="Previous Page", padx=90, pady=10, fg="white", bg="#2596be", font=('arial', 15), command=on_closing) + backButton.grid() + + cameraWindow.protocol("WM_DELETE_WINDOW", on_closing) + + + +########## Home-Page ########## +image= PhotoImage(file ='../assest/image.png',width=700,height=700) +Label(root, image= image, bg="black",).grid(column=0,row=0,columnspan=2) + +importButton = tk.Button(text="Import", padx=90, pady=10, fg="white", bg="#2596be", font=('arial',15), command=importWindow) +importButton.grid(row=1,column=0) + + +cameraButton= tk.Button(text="Camera", padx=90, pady=10, fg="white", bg="#2596be",font=('arial',15), command=videoWindow) +cameraButton.grid(row=1,column=1) + + +########################################### +root.mainloop() \ No newline at end of file diff --git a/Tkinter/tkinter_custom_button.py b/Tkinter/tkinter_custom_button.py new file mode 100644 index 0000000..259bfe8 --- /dev/null +++ b/Tkinter/tkinter_custom_button.py @@ -0,0 +1,276 @@ +import tkinter +import sys + + +class TkinterCustomButton(tkinter.Frame): + """ tkinter custom button with border, rounded corners and hover effect + + Arguments: master= where to place button + bg_color= background color, None is standard, + fg_color= foreground color, blue is standard, + hover_color= foreground color, lightblue is standard, + border_color= foreground color, None is standard, + border_width= border thickness, 0 is standard, + command= callback function, None is standard, + width= width of button, 110 is standard, + height= width of button, 35 is standard, + corner_radius= corner radius, 10 is standard, + text_font= (, ), + text_color= text color, white is standard, + text= text of button, + hover= hover effect, True is standard, + image= PIL.PhotoImage, standard is None""" + + def __init__(self, + bg_color=None, + fg_color="#2874A6", + hover_color="#5499C7", + border_color=None, + border_width=0, + command=None, + width=120, + height=40, + corner_radius=10, + text_font=None, + text_color="white", + text="CustomButton", + hover=True, + image=None, + *args, **kwargs): + super().__init__(*args, **kwargs) + + if bg_color is None: + self.bg_color = self.master.cget("bg") + else: + self.bg_color = bg_color + + self.fg_color = fg_color + self.hover_color = hover_color + self.border_color = border_color + + self.width = width + self.height = height + + if corner_radius*2 > self.height: + self.corner_radius = self.height/2 + elif corner_radius*2 > self.width: + self.corner_radius = self.width/2 + else: + self.corner_radius = corner_radius + + self.border_width = border_width + + if self.corner_radius >= self.border_width: + self.inner_corner_radius = self.corner_radius - self.border_width + else: + self.inner_corner_radius = 0 + + self.text = text + self.text_color = text_color + if text_font is None: + if sys.platform == "darwin": # macOS + self.text_font = ("Avenir", 13) + elif "win" in sys.platform: # Windows + self.text_font = ("Century Gothic", 11) + else: + self.text_font = ("TkDefaultFont") + else: + self.text_font = text_font + + self.image = image + + self.function = command + self.hover = hover + + self.configure(width=self.width, height=self.height) + + if sys.platform == "darwin" and self.function is not None: + self.configure(cursor="pointinghand") + + self.canvas = tkinter.Canvas(master=self, + highlightthicknes=0, + background=self.bg_color, + width=self.width, + height=self.height) + self.canvas.place(x=0, y=0) + + if self.hover is True: + self.canvas.bind("", self.on_enter) + self.canvas.bind("", self.on_leave) + + self.canvas.bind("", self.clicked) + self.canvas.bind("", self.clicked) + + self.canvas_fg_parts = [] + self.canvas_border_parts = [] + self.text_part = None + self.text_label = None + self.image_label = None + + self.draw() + + def draw(self): + self.canvas.delete("all") + self.canvas_fg_parts = [] + self.canvas_border_parts = [] + self.canvas.configure(bg=self.bg_color) + + # border button parts + if self.border_width > 0: + + if self.corner_radius > 0: + self.canvas_border_parts.append(self.canvas.create_oval(0, + 0, + self.corner_radius * 2, + self.corner_radius * 2)) + self.canvas_border_parts.append(self.canvas.create_oval(self.width - self.corner_radius * 2, + 0, + self.width, + self.corner_radius * 2)) + self.canvas_border_parts.append(self.canvas.create_oval(0, + self.height - self.corner_radius * 2, + self.corner_radius * 2, + self.height)) + self.canvas_border_parts.append(self.canvas.create_oval(self.width - self.corner_radius * 2, + self.height - self.corner_radius * 2, + self.width, + self.height)) + + self.canvas_border_parts.append(self.canvas.create_rectangle(0, + self.corner_radius, + self.width, + self.height - self.corner_radius)) + self.canvas_border_parts.append(self.canvas.create_rectangle(self.corner_radius, + 0, + self.width - self.corner_radius, + self.height)) + + # inner button parts + + if self.corner_radius > 0: + self.canvas_fg_parts.append(self.canvas.create_oval(self.border_width, + self.border_width, + self.border_width + self.inner_corner_radius * 2, + self.border_width + self.inner_corner_radius * 2)) + self.canvas_fg_parts.append(self.canvas.create_oval(self.width - self.border_width - self.inner_corner_radius * 2, + self.border_width, + self.width - self.border_width, + self.border_width + self.inner_corner_radius * 2)) + self.canvas_fg_parts.append(self.canvas.create_oval(self.border_width, + self.height - self.border_width - self.inner_corner_radius * 2, + self.border_width + self.inner_corner_radius * 2, + self.height-self.border_width)) + self.canvas_fg_parts.append(self.canvas.create_oval(self.width - self.border_width - self.inner_corner_radius * 2, + self.height - self.border_width - self.inner_corner_radius * 2, + self.width - self.border_width, + self.height - self.border_width)) + + self.canvas_fg_parts.append(self.canvas.create_rectangle(self.border_width + self.inner_corner_radius, + self.border_width, + self.width - self.border_width - self.inner_corner_radius, + self.height - self.border_width)) + self.canvas_fg_parts.append(self.canvas.create_rectangle(self.border_width, + self.border_width + self.inner_corner_radius, + self.width - self.border_width, + self.height - self.inner_corner_radius - self.border_width)) + + for part in self.canvas_fg_parts: + self.canvas.itemconfig(part, fill=self.fg_color, width=0) + + for part in self.canvas_border_parts: + self.canvas.itemconfig(part, fill=self.border_color, width=0) + + # no image given + if self.image is None: + # create tkinter.Label with text + self.text_label = tkinter.Label(master=self, + text=self.text, + font=self.text_font, + bg=self.fg_color, + fg=self.text_color) + self.text_label.place(relx=0.5, rely=0.5, anchor=tkinter.CENTER) + + # bind events the the button click and hover events also to the text_label + if self.hover is True: + self.text_label.bind("", self.on_enter) + self.text_label.bind("", self.on_leave) + + self.text_label.bind("", self.clicked) + self.text_label.bind("", self.clicked) + + self.set_text(self.text) + + # use the given image + else: + # create tkinter.Label with image on it + self.image_label = tkinter.Label(master=self, + image=self.image, + bg=self.fg_color) + + self.image_label.place(relx=0.5, + rely=0.5, + anchor=tkinter.CENTER) + + # bind events the the button click and hover events also to the image_label + if self.hover is True: + self.image_label.bind("", self.on_enter) + self.image_label.bind("", self.on_leave) + + self.image_label.bind("", self.clicked) + self.image_label.bind("", self.clicked) + + def configure_color(self, bg_color=None, fg_color=None, hover_color=None, text_color=None): + if bg_color is not None: + self.bg_color = bg_color + else: + self.bg_color = self.master.cget("bg") + + if fg_color is not None: + self.fg_color = fg_color + + # change background color of image_label + if self.image is not None: + self.image_label.configure(bg=self.fg_color) + + if hover_color is not None: + self.hover_color = hover_color + + if text_color is not None: + self.text_color = text_color + if self.text_part is not None: + self.canvas.itemconfig(self.text_part, fill=self.text_color) + + self.draw() + + def set_text(self, text): + if self.text_label is not None: + self.text_label.configure(text=text) + + def on_enter(self, event=0): + for part in self.canvas_fg_parts: + self.canvas.itemconfig(part, fill=self.hover_color, width=0) + + if self.text_label is not None: + # change background color of image_label + self.text_label.configure(bg=self.hover_color) + + if self.image_label is not None: + # change background color of image_label + self.image_label.configure(bg=self.hover_color) + + def on_leave(self, event=0): + for part in self.canvas_fg_parts: + self.canvas.itemconfig(part, fill=self.fg_color, width=0) + + if self.text_label is not None: + # change background color of image_label + self.text_label.configure(bg=self.fg_color) + + if self.image_label is not None: + # change background color of image_label + self.image_label.configure(bg=self.fg_color) + + def clicked(self, event=0): + if self.function is not None: + self.function() + self.on_leave() \ No newline at end of file diff --git a/assest/10-2-moustache-free-png-image.png b/assest/10-2-moustache-free-png-image.png new file mode 100644 index 0000000..c26900e Binary files /dev/null and b/assest/10-2-moustache-free-png-image.png differ diff --git a/assest/2.png b/assest/2.png new file mode 100644 index 0000000..a2b0b1a Binary files /dev/null and b/assest/2.png differ diff --git a/assest/back.png b/assest/back.png new file mode 100644 index 0000000..e2e70b5 Binary files /dev/null and b/assest/back.png differ diff --git a/assest/background/back1.png b/assest/background/back1.png new file mode 100644 index 0000000..d9f9437 Binary files /dev/null and b/assest/background/back1.png differ diff --git a/assest/background/back2.png b/assest/background/back2.png new file mode 100644 index 0000000..1c44a35 Binary files /dev/null and b/assest/background/back2.png differ diff --git a/assest/background/back3.png b/assest/background/back3.png new file mode 100644 index 0000000..215788b Binary files /dev/null and b/assest/background/back3.png differ diff --git a/assest/background/back4.png b/assest/background/back4.png new file mode 100644 index 0000000..c77959c Binary files /dev/null and b/assest/background/back4.png differ diff --git a/assest/background/back5.png b/assest/background/back5.png new file mode 100644 index 0000000..7d0c677 Binary files /dev/null and b/assest/background/back5.png differ diff --git a/assest/background/back6.png b/assest/background/back6.png new file mode 100644 index 0000000..7d0c677 Binary files /dev/null and b/assest/background/back6.png differ diff --git a/assest/beard.png b/assest/beard.png new file mode 100644 index 0000000..ece3102 Binary files /dev/null and b/assest/beard.png differ diff --git a/assest/birthday-hat.png b/assest/birthday-hat.png new file mode 100644 index 0000000..3fc8422 Binary files /dev/null and b/assest/birthday-hat.png differ diff --git a/assest/camera.png b/assest/camera.png new file mode 100644 index 0000000..bdbf825 Binary files /dev/null and b/assest/camera.png differ diff --git a/assest/cat-ears.png b/assest/cat-ears.png new file mode 100644 index 0000000..620308d Binary files /dev/null and b/assest/cat-ears.png differ diff --git a/assest/cat-nose.png b/assest/cat-nose.png new file mode 100644 index 0000000..576573b Binary files /dev/null and b/assest/cat-nose.png differ diff --git a/assest/eye1.png b/assest/eye1.png new file mode 100644 index 0000000..c6fb1a0 Binary files /dev/null and b/assest/eye1.png differ diff --git a/assest/eye2.png b/assest/eye2.png new file mode 100644 index 0000000..e53e72f Binary files /dev/null and b/assest/eye2.png differ diff --git a/assest/f01.png b/assest/f01.png new file mode 100644 index 0000000..83a563d Binary files /dev/null and b/assest/f01.png differ diff --git a/assest/f02.png b/assest/f02.png new file mode 100644 index 0000000..f416f3c Binary files /dev/null and b/assest/f02.png differ diff --git a/assest/f03.png b/assest/f03.png new file mode 100644 index 0000000..0054886 Binary files /dev/null and b/assest/f03.png differ diff --git a/assest/f04.png b/assest/f04.png new file mode 100644 index 0000000..4521da4 Binary files /dev/null and b/assest/f04.png differ diff --git a/assest/f05.png b/assest/f05.png new file mode 100644 index 0000000..700be4d Binary files /dev/null and b/assest/f05.png differ diff --git a/assest/f21.png b/assest/f21.png new file mode 100644 index 0000000..db75b5a Binary files /dev/null and b/assest/f21.png differ diff --git a/assest/f22.png b/assest/f22.png new file mode 100644 index 0000000..6c24f0e Binary files /dev/null and b/assest/f22.png differ diff --git a/assest/f31.png b/assest/f31.png new file mode 100644 index 0000000..92a779c Binary files /dev/null and b/assest/f31.png differ diff --git a/assest/f32.png b/assest/f32.png new file mode 100644 index 0000000..09d149c Binary files /dev/null and b/assest/f32.png differ diff --git a/assest/f33.png b/assest/f33.png new file mode 100644 index 0000000..5966d6b Binary files /dev/null and b/assest/f33.png differ diff --git a/assest/f34.png b/assest/f34.png new file mode 100644 index 0000000..ed00613 Binary files /dev/null and b/assest/f34.png differ diff --git a/assest/face.jpg b/assest/face.jpg new file mode 100644 index 0000000..cb38dea Binary files /dev/null and b/assest/face.jpg differ diff --git a/assest/flower-crown-png-42606.png b/assest/flower-crown-png-42606.png new file mode 100644 index 0000000..d2b454b Binary files /dev/null and b/assest/flower-crown-png-42606.png differ diff --git a/assest/flower-crown.png b/assest/flower-crown.png new file mode 100644 index 0000000..d2b454b Binary files /dev/null and b/assest/flower-crown.png differ diff --git a/assest/funny-sunglass.png b/assest/funny-sunglass.png new file mode 100644 index 0000000..6d673c7 Binary files /dev/null and b/assest/funny-sunglass.png differ diff --git a/assest/ghoul2.png b/assest/ghoul2.png new file mode 100644 index 0000000..8b4ca39 Binary files /dev/null and b/assest/ghoul2.png differ diff --git a/assest/gold-crown.png b/assest/gold-crown.png new file mode 100644 index 0000000..9838f0e Binary files /dev/null and b/assest/gold-crown.png differ diff --git a/assest/hair9.png b/assest/hair9.png new file mode 100644 index 0000000..b835895 Binary files /dev/null and b/assest/hair9.png differ diff --git a/assest/moustache2.png b/assest/moustache2.png new file mode 100644 index 0000000..c26900e Binary files /dev/null and b/assest/moustache2.png differ diff --git a/assest/mustach.png b/assest/mustach.png new file mode 100644 index 0000000..95dff4e Binary files /dev/null and b/assest/mustach.png differ diff --git a/assest/queens-crown.png b/assest/queens-crown.png new file mode 100644 index 0000000..dfcf506 Binary files /dev/null and b/assest/queens-crown.png differ diff --git a/assest/sunglasses.png b/assest/sunglasses.png new file mode 100644 index 0000000..cd50c32 Binary files /dev/null and b/assest/sunglasses.png differ diff --git a/assest/sunglasses2.png b/assest/sunglasses2.png new file mode 100644 index 0000000..cd50c32 Binary files /dev/null and b/assest/sunglasses2.png differ diff --git a/assest/tongue.png b/assest/tongue.png new file mode 100644 index 0000000..93636b8 Binary files /dev/null and b/assest/tongue.png differ diff --git a/assest/yas5.png b/assest/yas5.png new file mode 100644 index 0000000..b7fed9d Binary files /dev/null and b/assest/yas5.png differ diff --git a/assest/yas6.png b/assest/yas6.png new file mode 100644 index 0000000..487d052 Binary files /dev/null and b/assest/yas6.png differ diff --git a/assest/yas8.png b/assest/yas8.png new file mode 100644 index 0000000..f7ff4f4 Binary files /dev/null and b/assest/yas8.png differ diff --git a/background_image/Output_image/test4.png b/background_image/Output_image/test4.png new file mode 100644 index 0000000..8625337 Binary files /dev/null and b/background_image/Output_image/test4.png differ diff --git a/background_image/Output_image/yahia2.png b/background_image/Output_image/yahia2.png new file mode 100644 index 0000000..f465ae0 Binary files /dev/null and b/background_image/Output_image/yahia2.png differ diff --git a/background_image/backgorund_image.py b/background_image/backgorund_image.py new file mode 100644 index 0000000..6650206 --- /dev/null +++ b/background_image/backgorund_image.py @@ -0,0 +1,23 @@ +import pixellib +from background_image.check_directory import saveimage, check_image +from pixellib.tune_bg import alter_bg + + +def backgroundImage(type, input_image, color, back_ground): + path_name = "../saved/temp.png" + if path_name: + change_bg = alter_bg(model_type = "pb") + change_bg.load_pascalvoc_model("../background_image/xception_pascalvoc.pb") + + if type == 'image': + change_bg.change_bg_img(f_image_path = input_image, b_image_path = back_ground, output_image_name=path_name,detect='person') + if type == 'gray': + change_bg.gray_bg(input_image, output_image_name=path_name) + if type =='color': + change_bg.color_bg(input_image, colors=color, output_image_name=path_name) + if type == 'blur': + change_bg.blur_bg(input_image, low=True, output_image_name=path_name) + # saveimage(path_name) + + +# backgroundImage('gray','Output_image/test2.png','Input_image/lena.jpg') diff --git a/background_image/check_directory.py b/background_image/check_directory.py new file mode 100644 index 0000000..c8c6a1f --- /dev/null +++ b/background_image/check_directory.py @@ -0,0 +1,56 @@ +from os.path import exists +import os +import cv2 +import requests +def check_image(): + path = input('enter the name of path output : ') + path_image = f'../saved/{path}.png' + file_exists = exists(path_image) + while file_exists: + path_name2 = input('this name is exist try another one or enter q : ') + if path_name2 == 'q': + path_image = False + break + + path_image = f'../saved/{path_name2}.png' + file_exists = exists(path_image) + return path_image + +def saveimage(path): + os.remove("../saved/web.png") + image = cv2.imread(path) + cv2.imshow('image window', image) + cv2.waitKey(0) + cv2.destroyAllWindows() + save = input('do you want to save image ?') + if not save == 'yes': + os.remove(path) + return +def add_path(image_path,origin): + + path_image = "../saved/web.png" + + if ':' in image_path and "\\" in image_path and 'http' not in image_path: + img = cv2.imread(image_path) + cv2.imwrite(path_image, img) + + else: + r= requests.get(image_path) + with open(path_image,'wb') as f : + f.write(r.content) + + return path_image + +def file_exist(origin): + path_name = input('add path name : ') + path_image = f'{origin}{path_name}.png' + file_exists = exists(path_image) + while file_exists: + path_name2 = input('this name is exist try another one or enter q : ') + if path_name2 == 'q': + path_image = '' + break + + path_image = f'{origin}{path_name2}.png' + file_exists = exists(path_image) + return path_image \ No newline at end of file diff --git a/background_image/filter_background_image.py b/background_image/filter_background_image.py new file mode 100644 index 0000000..6072edb --- /dev/null +++ b/background_image/filter_background_image.py @@ -0,0 +1,25 @@ +from backgorund_image import backgroundImage +from check_directory import add_path +def filter_background_image(): + type_filter = int(input('type_filter : ')) + background_filter ='' + color = '' + type_of_background = input('enter type of background : [gray,image,blur,color] : ') + + path_input_image=add_path(input('image path : '),'Input_image/') + + if type_filter ==0 and type_of_background == 'image' : + background_filter = add_path(input('path image : '), 'input_background/') + backgroundImage(type_of_background, path_input_image,color, background_filter) + + + if type_of_background=='color': + add_color = input('input color rgb : 0,0,255 : ') + color = tuple(map(int, add_color.split(','))) + + if type_filter == 1 : + backgroundImage(type_of_background,path_input_image,color,'../assest/background/back1.png') + if type_filter == 2 : + backgroundImage(type_of_background,path_input_image,background_filter,color,'../assest/background/back2.png') + +filter_background_image() \ No newline at end of file diff --git a/background_image/input_background/test4.png b/background_image/input_background/test4.png new file mode 100644 index 0000000..9e08db8 Binary files /dev/null and b/background_image/input_background/test4.png differ diff --git a/background_live/background_live.py b/background_live/background_live.py new file mode 100644 index 0000000..6af1e0e --- /dev/null +++ b/background_live/background_live.py @@ -0,0 +1,63 @@ +import cv2 +import mediapipe as mp +import numpy as np + +mp_selfie_segmentation = mp.solutions.selfie_segmentation +back1 = 1 +cam = cv2.VideoCapture(0) +# cam.set(3, 1280) +# cam.set(4, 720) +fsize = (520, 720) +def background(path,blur =1,img=2): + + + + + + + + scene = cv2.imread(path) # read the scene image + # scene = cv2.blur(scene, (blur, blur)) + scene = cv2.resize(scene, (fsize[1], fsize[0])) # resize scene to the size of frame + with mp_selfie_segmentation.SelfieSegmentation(model_selection=1) as selfie_seg: + bg_image = scene + ret, frame = cam.read() + if not ret: + print("Error reading frame...") + + frame = cv2.resize(frame, (fsize[1], fsize[0])) + + # flip it to look like selfie camera + frame = cv2.flip(frame, 1) + + # get rgb image to pass that on selfie segmentation + rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + # process it! + results = selfie_seg.process(rgb) + + # get the condition from result's segmentation mask + condition = np.stack((results.segmentation_mask,) * 3, axis=-1) > 0.1 + + # apply background change if condition matches + output_image = np.where(condition, frame, bg_image) + return output_image + # show the output + # cv2.imshow('Background Change with MP', output_image) + # key = cv2.waitKey(5) & 0xFF + # + # if key == ord('n'): + # back1+=1 + # if back1 ==img+1: + # back1=1 + # path = f'../assest/background/back{back1}.png' + # + # + # if key == ord('q'): + # cam.release() + # cv2.destroyAllWindows() # wait until any key is pressed + cam.release() + cv2.destroyAllWindows() + + + diff --git a/background_live/filter_background_live.py b/background_live/filter_background_live.py new file mode 100644 index 0000000..ca161f6 --- /dev/null +++ b/background_live/filter_background_live.py @@ -0,0 +1,10 @@ +from background_live import background +from request_image import add_path,check_image + +type_filter = (input('add new background [yes,no] ? ')) +type_blur=int(input('number for blur start from 1 : ')) +if type_filter == 'yes': + add_path(input('path image : ')) + +img =check_image() +background('../assest/background/back1.png',type_blur,img) diff --git a/background_live/request_image.py b/background_live/request_image.py new file mode 100644 index 0000000..f00ceaf --- /dev/null +++ b/background_live/request_image.py @@ -0,0 +1,37 @@ +import requests +from os.path import exists +import os +import cv2 +def add_path(image_path): + path_image=file_exist() + if ':' in image_path and "\\" in image_path and 'http' not in image_path: + print('hello') + img = cv2.imread(image_path) + cv2.imwrite(path_image, img) + else: + print('hi') + r= requests.get(image_path) + with open(path_image,'wb') as f : + f.write(r.content) + + +def file_exist(): + img = 1 + path_image = f'../assest/background/back{img}.png' + file_exists = exists(path_image) + while file_exists : + img+=1 + path_image = f'../assest/background/back{img}.png' + file_exists = exists(path_image) + return path_image + +def check_image(): + img = 1 + path_image = f'../assest/background/back{img}.png' + file_exists = exists(path_image) + while file_exists: + img += 1 + path_image = f'../assest/background/back{img}.png' + file_exists = exists(path_image) + return img-1 + diff --git a/filters/__pycache__/dogfilter.cpython-39.pyc b/filters/__pycache__/dogfilter.cpython-39.pyc deleted file mode 100644 index 7d42965..0000000 Binary files a/filters/__pycache__/dogfilter.cpython-39.pyc and /dev/null differ diff --git a/filters/__pycache__/filter1.cpython-39.pyc b/filters/__pycache__/filter1.cpython-39.pyc deleted file mode 100644 index 3d23894..0000000 Binary files a/filters/__pycache__/filter1.cpython-39.pyc and /dev/null differ diff --git a/filters/dogfilter.py b/filters/dogfilter.py deleted file mode 100644 index 964de52..0000000 --- a/filters/dogfilter.py +++ /dev/null @@ -1,58 +0,0 @@ -import cv2 -import numpy as np -import dlib -from math import hypot -filter_image = cv2.imread("assest\dogfilter.png") -# Loading Face detector -detector = dlib.get_frontal_face_detector() -predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat") - -def filteringdog(cap,rows, cols): - filter1 = np.zeros((rows, cols), np.uint8) - _, frame = cap.read() - filter1.fill(0) - gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - faces = detector(frame) - # if faces: - try: - - for face in faces: - landmarks = predictor(gray_frame, face) - - # filter coordinates - # top_filter = (landmarks.part(27).x, landmarks.part(24).y) - center_filter = (landmarks.part(27).x, landmarks.part(27).y) - left_filter = (landmarks.part(2).x, landmarks.part(2).y) - right_filter = (landmarks.part(14).x, landmarks.part(14).y) - - filter_width = int(hypot(left_filter[0] - right_filter[0], - left_filter[1] - right_filter[1]) * 1.3) - filter_height = int(filter_width *1.5) - - # New filter position - top_left = (int(center_filter[0] - filter_width / 2), - int(center_filter[1] - filter_height / 2)) - bottom_right = (int(center_filter[0] + filter_width / 2), - int(center_filter[1] + filter_height / 2)) - - # Adding the new filter - # coloring - filtery = cv2.resize(filter_image, (filter_width, filter_height)) - filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY) - _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV) - - filter_area = frame[top_left[1]: top_left[1] + filter_height, - top_left[0]: top_left[0] + filter_width] - filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1) - final_filter = cv2.add(filter_area_no_filter, filtery) - - frame[top_left[1]: top_left[1] + filter_height, - top_left[0]: top_left[0] + filter_width] = final_filter - - cv2.imshow("Frame", frame) - except: - _, frame_f = cap.read() - cv2.imshow("Frame", frame_f) - # else: - # _, frame_f = cap.read() - # cv2.imshow("Frame", frame_f) diff --git a/filters/filter1.py b/filters/filter1.py deleted file mode 100644 index 1329588..0000000 --- a/filters/filter1.py +++ /dev/null @@ -1,58 +0,0 @@ -import cv2 -import numpy as np -import dlib -from math import hypot -filter_image = cv2.imread("assest\image1.png") -# Loading Face detector -detector = dlib.get_frontal_face_detector() -predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat") - -def filtering(cap,rows, cols): - filter1 = np.zeros((rows, cols), np.uint8) - _, frame = cap.read() - filter1.fill(0) - gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - faces = detector(frame) - if faces: - try: - - for face in faces: - landmarks = predictor(gray_frame, face) - - # filter coordinates - # top_filter = (landmarks.part(27).x, landmarks.part(24).y) - center_filter = (landmarks.part(32).x, landmarks.part(24).y) - left_filter = (landmarks.part(3).x, landmarks.part(2).y) - right_filter = (landmarks.part(13).x, landmarks.part(13).y) - - filter_width = int(hypot(left_filter[0] - right_filter[0], - left_filter[1] - right_filter[1]) * 1.7) - filter_height = int(filter_width *1.1) - - # New filter position - top_left = (int(center_filter[0] - filter_width / 2), - int(center_filter[1] - filter_height / 2)) - bottom_right = (int(center_filter[0] + filter_width / 2), - int(center_filter[1] + filter_height / 2)) - - # Adding the new filter - # coloring - filtery = cv2.resize(filter_image, (filter_width, filter_height)) - filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY) - _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV) - - filter_area = frame[top_left[1]: top_left[1] + filter_height, - top_left[0]: top_left[0] + filter_width] - filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1) - final_filter = cv2.add(filter_area_no_filter, filtery) - - frame[top_left[1]: top_left[1] + filter_height, - top_left[0]: top_left[0] + filter_width] = final_filter - - cv2.imshow("Frame", frame) - except: - _, frame_f = cap.read() - cv2.imshow("Frame", frame_f) - else: - _, frame_f = cap.read() - cv2.imshow("Frame", frame_f) diff --git a/filters_image/change_filter.py b/filters_image/change_filter.py new file mode 100644 index 0000000..f7a0f55 --- /dev/null +++ b/filters_image/change_filter.py @@ -0,0 +1,58 @@ +## jaso file for adding + +change_filter =[ {'filter': ['../assest/f21.png',"../assest/beard.png"], + 'center': [27,8], + 'width': [1.2,1.1], + 'height': [0.33,1.5], + 'up': [-5,0], + 'left': [0.5,0], + 'counte': 7}, + {'filter': ['../assest/f31.png', "../assest/f22.png", "../assest/f33.png"], + 'center': [27, 8, 27], + 'width': [1.3, 1.1, 1], + 'height': [0.9, 1, 0.33], + 'up': [140, -70, -20], + 'left': [0.5, 0, 0], + 'counte': 7}, + + {'filter':['../assest/f02.png','../assest/f04.png'], + 'center':[27,27],#27 + 'width':[1.2,3],#1.5 + 'height':[0.6,0.75],#0.4 + 'up':[30,0],#-7 + 'left':[0,0], + 'counte':1}, + + {'filter': ["../assest/yas6.png","../assest/yas8.png","../assest/yas5.png",], + 'center': [51,27,51,], + 'width': [0.8,1.2,0.5,], + 'height': [0.5,0.33,1,], + 'up': [5,-5,-15,], + 'left': [0,0.5,-40,], + 'counte':2}, + {'filter': ['../assest/moustache2.png'], + 'center': [51], + 'width': [1], + 'height': [0.5], + 'up': [1], + 'left': [-3], + 'counte': 7}, + {'filter': ["../assest/funny-sunglass.png"], + 'center': [27], + 'width': [1.05], + 'height': [0.33], + 'up': [0], + 'left': [0], + 'counte': 6}, + + {'filter': ["../assest/tongue.png"], + 'center': [57], + 'width': [0.6], + 'height': [1.2], + 'up': [-25], + 'left': [0], + 'counte': 0}, + + + ] + diff --git a/filters_image/image_filtering_face.py b/filters_image/image_filtering_face.py new file mode 100644 index 0000000..d30a8b4 --- /dev/null +++ b/filters_image/image_filtering_face.py @@ -0,0 +1,72 @@ +import cv2 +import numpy as np +import dlib +from math import hypot +# from filters_image.change_filter_image import change_filter +import os + +def image_filtering_face(path_filter,path_img,center,width,height,up,left,counte=0): + + + filter_image = [] + for i in path_filter: + filter_image.append(cv2.imread(i)) + + image = cv2.imread(path_img) + rows, cols, _ = image.shape + filter1 = np.zeros((rows, cols), np.uint8) + filter1.fill(0) + gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + detector = dlib.get_frontal_face_detector() + faces = detector(image) + if faces: + # try: + for i in range(len(path_filter)): + test = filter(image,gray_image,faces,filter_image[i],center[i],width[i],height[i],up[i],left[i]) + return test + # except: + # image = cv2.imread(path_img) + + # return image + + +def filter(image,gray_frame,faces,filter_image1,center,width,height,up=0,left=0): + predictor_path = "../assest/shape_predictor_68_face_landmarks.dat" + predictor = dlib.shape_predictor(predictor_path) + for face in faces: + try: + landmarks = predictor(gray_frame, face) + + center_filter = (landmarks.part(center).x-left, landmarks.part(center).y-up) + left_filter = (landmarks.part(4).x, landmarks.part(4).y) + right_filter = (landmarks.part(14).x, landmarks.part(14).y) + + filter_width = int(hypot(left_filter[0] - right_filter[0], + left_filter[1] - right_filter[1]) * width) + filter_height = int(filter_width * height) + + # New filter position + top_left = (int(center_filter[0] - filter_width / 2), + int(center_filter[1] - filter_height / 2)) + bottom_right = (int(center_filter[0] + filter_width / 2), + int(center_filter[1] + filter_height / 2)) + + # Adding the new filter + filtery = cv2.resize(filter_image1, (filter_width, filter_height)) + filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY) + _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV) + + filter_area = image[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width] + filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1) + final_filter = cv2.add(filter_area_no_filter, filtery) + + image[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width,:] = final_filter + print("filter1") + except: + print("except") + return image + # cv2.imshow("Frame", image) + # key = cv2.waitKey(0) + diff --git a/filters_live/change_filter.py b/filters_live/change_filter.py new file mode 100644 index 0000000..341f679 --- /dev/null +++ b/filters_live/change_filter.py @@ -0,0 +1,55 @@ +## jaso file for adding + +change_filter =[{'filter':['../assest/f02.png','../assest/f04.png'], + 'center':[27,27],#27 + 'width':[1.2,4],#1.5 + 'height':[0.6,0.75],#0.4 + 'up':[20,0],#-7 + 'left':[0,0], + 'counte':1}, + {'filter': ['../assest/moustache2.png'], + 'center': [51], + 'width': [1], + 'height': [0.5], + 'up': [1], + 'left': [-3], + 'counte': 7}, + {'filter': ["../assest/yas6.png","../assest/yas8.png","../assest/yas5.png",], + 'center': [51,27,51,], + 'width': [0.8,1.2,0.5,], + 'height': [0.5,0.33,1,], + 'up': [5,-5,-15,], + 'left': [0,0.5,-40,], + 'counte':2}, + {'filter': ["../assest/funny-sunglass.png"], + 'center': [27], + 'width': [1.05], + 'height': [0.33], + 'up': [0], + 'left': [0], + 'counte': 6}, + {'filter': ['../assest/f21.png',"../assest/beard.png"], + 'center': [27,8], + 'width': [1.2,1.1], + 'height': [0.33,1.5], + 'up': [-5,0], + 'left': [0.5,0], + 'counte': 7}, + {'filter': ['../assest/f31.png',"../assest/f22.png","../assest/f33.png"], + 'center': [27,8,27], + 'width': [1.3,1.1,1], + 'height': [0.9,1,0.33], + 'up': [90,-70,0], + 'left': [0.5,0,0], + 'counte': 7}, + {'filter': ["../assest/tongue.png"], + 'center': [57], + 'width': [0.6], + 'height': [1.2], + 'up': [-25], + 'left': [0], + 'counte': 0}, + + + ] + diff --git a/filters_live/glasses_black.py b/filters_live/glasses_black.py new file mode 100644 index 0000000..ba38ef4 --- /dev/null +++ b/filters_live/glasses_black.py @@ -0,0 +1,62 @@ +import cv2 +import numpy as np +import dlib +from math import hypot +filter_image = cv2.imread("assest/sunglasses2.png") + +# Loading Face detector +detector = dlib.get_frontal_face_detector() +predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat") + +def filteringmouse(cap,rows, cols): + filter1 = np.zeros((rows, cols), np.uint8) + _, frame = cap.read() + filter1.fill(0) + gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + faces = detector(frame) + + # if faces: + try: + filter(frame,gray_frame,faces,filter_image,27,27,1,0.35) + # filter(frame, gray_frame, faces, filter_image3,27,27,1.5,2,25) + except: + _, frame_f = cap.read() + cv2.imshow("Frame", frame_f) + # else: + # _, frame_f = cap.read() + # cv2.imshow("Frame", frame_f) +def filter(frame,gray_frame,faces,filter_image1,X,Y,width,height,above=0,left=0): + for face in faces: + landmarks = predictor(gray_frame, face) + + # filter coordinates + # top_filter = (landmarks.part(27).x+10, landmarks.part(24).y+10) + center_filter = (landmarks.part(X).x-left, landmarks.part(Y).y-above) + left_filter = (landmarks.part(0).x, landmarks.part(0).y) + right_filter = (landmarks.part(16).x, landmarks.part(16).y) + + filter_width = int(hypot(left_filter[0] - right_filter[0], + left_filter[1] - right_filter[1]) * width) + filter_height = int(filter_width * height) + + # New filter position + top_left = (int(center_filter[0] - filter_width / 2), + int(center_filter[1] - filter_height /2 )) + bottom_right = (int(center_filter[0] + filter_width / 2), + int(center_filter[1] + filter_height / 2)) + + # Adding the new filter + # coloring + filtery = cv2.resize(filter_image1, (filter_width, filter_height)) + filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY) + _, filter1 = cv2.threshold(filtery_gray, 125, 225, cv2.THRESH_BINARY) + + filter_area = frame[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width] + filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1) + # final_filter = cv2.add(filter_area_no_filter, filtery) + + frame[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width,:] = filter_area_no_filter + + cv2.imshow("Frame", frame) \ No newline at end of file diff --git a/filters_live/gost.py b/filters_live/gost.py new file mode 100644 index 0000000..ac67b37 --- /dev/null +++ b/filters_live/gost.py @@ -0,0 +1,30 @@ +import cv2 +import numpy as np +import dlib +from math import hypot +filter_image = cv2.imread("assest/moustache2.png") + +# Loading Face detector +detector = dlib.get_frontal_face_detector() +predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat") + +def filteringmouse(cap,rows, cols): + filter1 = np.zeros((rows, cols), np.uint8) + _, frame = cap.read() + filter1.fill(0) + gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + faces = detector(frame) + + # if faces: + try: + filter(frame) + # filter(frame, gray_frame, faces, filter_image3,27,27,1.5,2,25) + except: + _, frame_f = cap.read() + cv2.imshow("Frame", frame_f) + # else: + # _, frame_f = cap.read() + # cv2.imshow("Frame", frame_f) +def filter(frame): + invert = cv2.bitwise_not(frame) + cv2.imshow("Frame", invert) \ No newline at end of file diff --git a/filters_live/mustache_filter.py b/filters_live/mustache_filter.py new file mode 100644 index 0000000..fb91c80 --- /dev/null +++ b/filters_live/mustache_filter.py @@ -0,0 +1,62 @@ +import cv2 +import numpy as np +import dlib +from math import hypot +filter_image = cv2.imread("assest/mustach.png") + +# Loading Face detector +detector = dlib.get_frontal_face_detector() +predictor = dlib.shape_predictor("assest\shape_predictor_68_face_landmarks.dat") + +def filteringmouse(cap,rows, cols): + filter1 = np.zeros((rows, cols), np.uint8) + _, frame = cap.read() + filter1.fill(0) + gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + faces = detector(frame) + + # if faces: + try: + filter(frame,gray_frame,faces,filter_image,57,57,1,1) + # filter(frame, gray_frame, faces, filter_image3,27,27,1.5,2,25) + except: + _, frame_f = cap.read() + cv2.imshow("Frame", frame_f) + # else: + # _, frame_f = cap.read() + # cv2.imshow("Frame", frame_f) +def filter(frame,gray_frame,faces,filter_image1,X,Y,width,height,above=0,left=0): + for face in faces: + landmarks = predictor(gray_frame, face) + + # filter coordinates + # top_filter = (landmarks.part(27).x+10, landmarks.part(24).y+10) + center_filter = (landmarks.part(X).x-left, landmarks.part(Y).y-above) + left_filter = (landmarks.part(4).x, landmarks.part(4).y) + right_filter = (landmarks.part(12).x, landmarks.part(12).y) + + filter_width = int(hypot(left_filter[0] - right_filter[0], + left_filter[1] - right_filter[1]) * width) + filter_height = int(filter_width * height) + + # New filter position + top_left = (int(center_filter[0] - filter_width / 2), + int(center_filter[1] - filter_height /2 )) + bottom_right = (int(center_filter[0] + filter_width / 2), + int(center_filter[1] + filter_height / 2)) + + # Adding the new filter + # coloring + filtery = cv2.resize(filter_image1, (filter_width, filter_height)) + filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY) + _, filter1 = cv2.threshold(filtery_gray, 125, 225, cv2.THRESH_BINARY) + + filter_area = frame[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width] + filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1) + # final_filter = cv2.add(filter_area_no_filter, filtery) + + frame[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width,:] = filter_area_no_filter + + cv2.imshow("Frame", frame) \ No newline at end of file diff --git a/filters_live/shape_predictor_68_face_landmarks.dat b/filters_live/shape_predictor_68_face_landmarks.dat new file mode 100644 index 0000000..e0ec20d Binary files /dev/null and b/filters_live/shape_predictor_68_face_landmarks.dat differ diff --git a/filters_live/video_filtering_face.py b/filters_live/video_filtering_face.py new file mode 100644 index 0000000..bad3ebb --- /dev/null +++ b/filters_live/video_filtering_face.py @@ -0,0 +1,123 @@ +import cv2 +import numpy as np +import dlib +from math import hypot +from filters_live.change_filter import change_filter +import mediapipe as mp + +import os +cap = cv2.VideoCapture(0) +detector = dlib.get_frontal_face_detector() +predictor = dlib.shape_predictor("../filters_live/shape_predictor_68_face_landmarks.dat") +counte=0 +def video_filtering_face(path,center,width,height,up,left,path_back,blur,filter_face,background_face): + + filter_image = [] + for i in path: + filter_image.append(cv2.imread(i)) + # + _, frame = cap.read() + rows, cols, _ = frame.shape + filter1 = np.zeros((rows, cols), np.uint8) + + filter1.fill(0) + gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + faces = detector(frame) + if faces: + try: + if filter_face: + for i in range(len(path)): + filter(frame,gray_frame,faces,filter_image[i],center[i],width[i],height[i],up[i],left[i]) + if background_face: + frame =background(frame,path_back,blur ) + + except: + _, frame = cap.read() + else: + _, frame = cap.read() + + return frame + + +def filter(frame,gray_frame,faces,filter_image1,center,width,height,up=0,left=0): + for face in faces: + landmarks = predictor(gray_frame, face) + + + center_filter = (landmarks.part(center).x-left, landmarks.part(center).y-up) + left_filter = (landmarks.part(4).x, landmarks.part(4).y) + right_filter = (landmarks.part(14).x, landmarks.part(14).y) + + filter_width = int(hypot(left_filter[0] - right_filter[0], + left_filter[1] - right_filter[1]) * width) + filter_height = int(filter_width * height) + + top_left = (int(center_filter[0] - filter_width / 2), + int(center_filter[1] - filter_height / 2)) + bottom_right = (int(center_filter[0] + filter_width / 2), + int(center_filter[1] + filter_height / 2)) + + + filtery = cv2.resize(filter_image1, (filter_width, filter_height)) + filtery_gray = cv2.cvtColor(filtery, cv2.COLOR_BGR2GRAY) + _, filter1 = cv2.threshold(filtery_gray, 25, 255, cv2.THRESH_BINARY_INV) + + filter_area = frame[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width] + + filter_area_no_filter = cv2.bitwise_and(filter_area, filter_area, mask=filter1) + final_filter = cv2.add(filter_area_no_filter, filtery) + + frame[top_left[1]: top_left[1] + filter_height, + top_left[0]: top_left[0] + filter_width,:] = final_filter + + # cv2.imshow("Frame", frame) + +def change_image(i): + video_filtering_face( + change_filter[i]['filter'], + change_filter[i]['center'], + change_filter[i]['width'], + change_filter[i]['height'], + change_filter[i]['up'], + change_filter[i]['left'], + change_filter[i]['counte'] + ) + +mp_selfie_segmentation = mp.solutions.selfie_segmentation +fsize = (480, 640) +def background(frame,path,blur =1): + + + scene = cv2.imread(path) # read the scene image + scene = cv2.blur(scene, (blur, blur)) + scene = cv2.resize(scene, (fsize[1], fsize[0])) # resize scene to the size of frame + with mp_selfie_segmentation.SelfieSegmentation(model_selection=1) as selfie_seg: + bg_image = scene + # ret, frame = cap.read() + # if not ret: + # print("Error reading frame...") + + frame = cv2.resize(frame, (fsize[1], fsize[0])) + + # flip it to look like selfie camera + # frame = cv2.flip(frame, 1) + + # get rgb image to pass that on selfie segmentation + rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + # process it! + results = selfie_seg.process(rgb) + + # get the condition from result's segmentation mask + condition = np.stack((results.segmentation_mask,) * 3, axis=-1) > 0.1 + + # apply background change if condition matches + output_image = np.where(condition, frame, bg_image) + return output_image + + cam.release() + cv2.destroyAllWindows() + +if __name__ == "__main__": + video_filtering_face(["../assest/tongue.png"],[57],[0.6],[1.2],[-25],[0]) \ No newline at end of file diff --git a/main.py b/main.py index 8abb01a..e69de29 100644 --- a/main.py +++ b/main.py @@ -1,16 +0,0 @@ -import cv2 -import numpy as np -import dlib -from filters.filter1 import filtering -from filters.dogfilter import filteringdog -# Loading Camera and Nose image and Creating mask -cap = cv2.VideoCapture(0) -_, frame = cap.read() -# print(frame) -rows, cols, _ = frame.shape -filter1 = np.zeros((rows, cols), np.uint8) -while True: - filteringdog(cap,rows, cols) - key = cv2.waitKey(1) - if key == 27: - break \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 44cd5b5..c32c3e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ description = "" authors = ["hayaa1234 "] [tool.poetry.dependencies] -python = "^3.9" +python = "~3.9" [tool.poetry.dev-dependencies]