Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added fuctionalities of detect_execpt in blug_bg #98

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified pixellib/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file modified pixellib/__pycache__/deeplab.cpython-37.pyc
Binary file not shown.
Binary file modified pixellib/__pycache__/semantic.cpython-37.pyc
Binary file not shown.
Binary file modified pixellib/__pycache__/tune_bg.cpython-37.pyc
Binary file not shown.
4 changes: 4 additions & 0 deletions pixellib/instance.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,7 @@ def segmentImage(self, image_path, show_bboxes = False, segment_target_classes
# Run detection
if verbose is not None:
print("Processing image...")
print("From segmentImage")
results = self.model.detect([new_img])


Expand Down Expand Up @@ -440,6 +441,7 @@ def segmentFrame(self, frame, show_bboxes = False, segment_target_classes = Non
# Run detection
if verbose is not None:
print("Processing image...")
print("segmentFrame")
results = self.model.detect([new_frame])


Expand Down Expand Up @@ -703,6 +705,7 @@ def segmentImage(self, image_path, show_bboxes = False, extract_segmented_obje
# Run detection
if verbose is not None:
print("Processing image...")
print("SegmentImage")
results = self.model.detect([new_img])


Expand Down Expand Up @@ -939,6 +942,7 @@ def segmentFrame(self, frame, show_bboxes = False, extract_segmented_objects =
# Run detection
if verbose is not None:
print("Processing image...")
print("segmentFrame")
results = self.model.detect([new_frame])


Expand Down
2 changes: 2 additions & 0 deletions pixellib/semantic.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ def segmentAsPascalvoc(self, image_path, process_frame = False, output_image_nam

if verbose is not None:
print("Processing image....")
print("segmentAsPascalvoc")

#run prediction
res = self.model.predict(np.expand_dims(resized_image, 0))
Expand Down Expand Up @@ -408,6 +409,7 @@ def segmentAsAde20k(self, image_path, output_image_name=None,overlay=False, extr

if verbose is not None:
print("Processing image....")
print("segmentAsAde20k")
#run prediction
res = self.model2.predict(np.expand_dims(resized_image, 0))

Expand Down
58 changes: 47 additions & 11 deletions pixellib/tune_bg.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,11 @@
import cv2
import time
from datetime import datetime
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
from IPython.display import Image as img
from pylab import rcParams


class alter_bg():
Expand All @@ -18,6 +23,7 @@ def __init__(self, model_type = "h5"):
global model_file
self.model_type = model_type
model_file = model_type


self.model = Deeplab_xcep_pascal()

Expand Down Expand Up @@ -67,7 +73,7 @@ def segmentAsPascalvoc(self, image_path, process_frame = False):
labels = obtain_segmentation(seg_image)
labels = np.array(Image.fromarray(labels.astype('uint8')).resize((w, h)))
labels = cv2.cvtColor(labels, cv2.COLOR_RGB2BGR)

print("Called")
return raw_labels, labels

else:
Expand Down Expand Up @@ -232,6 +238,7 @@ def target_obj(self, segment):
def change_bg_img(self, f_image_path,b_image_path, output_image_name = None, verbose = None, detect = None):
if verbose is not None:
print("processing image......")
print("change_bg_img")

seg_image = self.segmentAsPascalvoc(f_image_path)

Expand All @@ -255,6 +262,7 @@ def change_bg_img(self, f_image_path,b_image_path, output_image_name = None, ver
def change_frame_bg(self, frame,b_image_path, verbose = None, detect = None):
if verbose is not None:
print("processing frame......")
print("change_bg_img")

seg_frame = self.segmentAsPascalvoc(frame, process_frame= True)

Expand Down Expand Up @@ -388,6 +396,7 @@ def change_camera_bg(self, cam, b_image_path, frames_per_second = None, check_fp
def color_bg(self, image_path, colors, output_image_name = None, verbose = None, detect = None):
if verbose is not None:
print("processing image......")
print("color_bg")

seg_image = self.segmentAsPascalvoc(image_path)
if detect is not None:
Expand Down Expand Up @@ -416,6 +425,7 @@ def color_bg(self, image_path, colors, output_image_name = None, verbose = None,
def color_frame(self, frame, colors, verbose = None, detect = None):
if verbose is not None:
print("processing frame....")
print("color_frame")

seg_frame = self.segmentAsPascalvoc(frame, process_frame=True)

Expand Down Expand Up @@ -555,15 +565,26 @@ def color_camera(self, cam, colors, frames_per_second = None, check_fps = False,

##### BLUR THE BACKGROUND OF AN IMAGE #####

def blur_bg(self, image_path,low = False, moderate = False, extreme = False, output_image_name = None, verbose = None, detect = None):
def blur_bg(self, image_path,low = False, moderate = False, extreme = False, output_image_name = None, verbose = None, detect = None, detect_execpt = None):
if verbose is not None:
print("processing image......")
print("blur_bg")

seg_image = self.segmentAsPascalvoc(image_path)

#print(seg_image)
if detect_execpt is not None:
for obj in detect_execpt:
print(obj)
target_class = self.target_obj(obj)
seg_image[1][seg_image[1] != target_class] = 0

if detect is not None:
target_class = self.target_obj(detect)
seg_image[1][seg_image[1] != target_class] = 0

for obj in detect:
print(obj)
target_class = self.target_obj(obj)
seg_image[1][seg_image[1] != target_class] = 0

ori_img = cv2.imread(image_path)

Expand All @@ -575,8 +596,12 @@ def blur_bg(self, image_path,low = False, moderate = False, extreme = False, out

if extreme == True:
blur_img = cv2.blur(ori_img, (81,81), 0)

out = np.where(seg_image[1], ori_img, blur_img)
#print(blur_img)
if detect_execpt:
out = np.where(seg_image[1], blur_img,ori_img)
else:
out = np.where(seg_image[1], ori_img, blur_img)
#cv2.imwrite("seg_image.png", seg_image[1])

if output_image_name is not None:
cv2.imwrite(output_image_name, out)
Expand All @@ -590,6 +615,7 @@ def blur_bg(self, image_path,low = False, moderate = False, extreme = False, out
def blur_frame(self, frame,low = False, moderate = False, extreme = False, verbose = None, detect = None):
if verbose is not None:
print("processing frame......")
print("blur_frame")

seg_frame = self.segmentAsPascalvoc(frame, process_frame=True)
if detect is not None:
Expand All @@ -613,7 +639,7 @@ def blur_frame(self, frame,low = False, moderate = False, extreme = False, verbo
#### BLUR THE BACKGROUND OF A VIDEO #####

def blur_video(self, video_path, low = False, moderate = False, extreme = False, frames_per_second = None,
output_video_name = None, detect = None):
output_video_name = None, detect = None, detect_except = None):
capture = cv2.VideoCapture(video_path)
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
Expand All @@ -630,9 +656,16 @@ def blur_video(self, video_path, low = False, moderate = False, extreme = False,

seg_frame = self.segmentAsPascalvoc(frame, process_frame=True)
print("No. of frames:", counter)

if detect_except is not None:
for obj in detect_except:
target_class = self.target_obj(obj)
seg_frame[1][seg_frame[1] == target_class] = 1

if detect is not None:
target_class = self.target_obj(detect)
seg_frame[1][seg_frame[1] != target_class] = 0
for obj in detect:
target_class = self.target_obj(detect)
seg_frame[1][seg_frame[1] != target_class] = 0

if low == True:
blur_frame = cv2.blur(frame, (21,21), 0)
Expand All @@ -642,8 +675,11 @@ def blur_video(self, video_path, low = False, moderate = False, extreme = False,

if extreme == True:
blur_frame = cv2.blur(frame, (81,81), 0)

out = np.where(seg_frame[1], frame, blur_frame)
if detect_except:
print("detect_except")
out = np.where(seg_frame[1],blur_frame, frame)
else:
out = np.where(seg_frame[1], frame, blur_frame)


output = cv2.resize(out, (width,height), interpolation=cv2.INTER_AREA)
Expand Down