Skip to content

Commit

Permalink
Major Clean UP
Browse files Browse the repository at this point in the history
  • Loading branch information
murtazahassan committed Sep 7, 2023
0 parents commit c917488
Show file tree
Hide file tree
Showing 45 changed files with 2,490 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
dist
venv
.idea
13 changes: 13 additions & 0 deletions Examples/ClassificationModuleExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from cvzone.ClassificationModule import Classifier
import cv2

cap = cv2.VideoCapture(2) # Initialize video capture
path = "C:/Users/USER/Documents/maskModel/"
maskClassifier = Classifier(f'{path}/keras_model.h5', f'{path}/labels.txt')

while True:
_, img = cap.read() # Capture frame-by-frame
prediction = maskClassifier.getPrediction(img)
print(prediction) # Print prediction result
cv2.imshow("Image", img)
cv2.waitKey(1) # Wait for a key press
37 changes: 37 additions & 0 deletions Examples/ColorModuleExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import cvzone
import cv2

# Create an instance of the ColorFinder class with trackBar set to True.
myColorFinder = cvzone.ColorFinder(trackBar=True)

# Initialize the video capture using OpenCV.
# Using the third camera (index 2). Adjust index if you have multiple cameras.
cap = cv2.VideoCapture(2)

# Set the dimensions of the camera feed to 640x480.
cap.set(3, 640)
cap.set(4, 480)

# Custom color values for detecting orange.
# 'hmin', 'smin', 'vmin' are the minimum values for Hue, Saturation, and Value.
# 'hmax', 'smax', 'vmax' are the maximum values for Hue, Saturation, and Value.
hsvVals = {'hmin': 10, 'smin': 55, 'vmin': 215, 'hmax': 42, 'smax': 255, 'vmax': 255}

# Main loop to continuously get frames from the camera.
while True:
# Read the current frame from the camera.
success, img = cap.read()

# Use the update method from the ColorFinder class to detect the color.
# It returns the masked color image and a binary mask.
imgOrange, mask = myColorFinder.update(img, hsvVals)

# Stack the original image, the masked color image, and the binary mask.
imgStack = cvzone.stackImages([img, imgOrange, mask], 3, 1)

# Show the stacked images.
cv2.imshow("Image Stack", imgStack)

# Break the loop if the 'q' key is pressed.
if cv2.waitKey(1) & 0xFF == ord('q'):
break
27 changes: 27 additions & 0 deletions Examples/CornerRectangleExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import cv2
import cvzone # Importing the cvzone library

# Initialize the webcam
cap = cv2.VideoCapture(2) # Capture video from the third webcam (0-based index)

# Main loop to continuously capture frames
while True:
# Capture a single frame from the webcam
success, img = cap.read() # 'success' is a boolean that indicates if the frame was captured successfully, and 'img' contains the frame itself

# Add a rectangle with styled corners to the image
img = cvzone.cornerRect(
img, # The image to draw on
(200, 200, 300, 200), # The position and dimensions of the rectangle (x, y, width, height)
l=30, # Length of the corner edges
t=5, # Thickness of the corner edges
rt=1, # Thickness of the rectangle
colorR=(255, 0, 255), # Color of the rectangle
colorC=(0, 255, 0) # Color of the corner edges
)

# Show the modified image
cv2.imshow("Image", img) # Display the image in a window named "Image"

# Wait for 1 millisecond between frames
cv2.waitKey(1) # Waits 1 ms for a key event (not being used here)
14 changes: 14 additions & 0 deletions Examples/DownloadImageFromURL.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import cv2
import cvzone

imgNormal = cvzone.downloadImageFromUrl(
url='https://github.com/cvzone/cvzone/blob/master/Results/shapes.png?raw=true')

imgPNG = cvzone.downloadImageFromUrl(
url='https://github.com/cvzone/cvzone/blob/master/Results/cvzoneLogo.png?raw=true',
keepTransparency=True)
imgPNG =cv2.resize(imgPNG,(0,0),None,3,3)

cv2.imshow("Image Normal", imgNormal)
cv2.imshow("Transparent Image", imgPNG)
cv2.waitKey(0)
45 changes: 45 additions & 0 deletions Examples/FaceDetectionExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import cvzone
from cvzone.FaceDetectionModule import FaceDetector
import cv2

# Initialize the webcam
# '2' means the third camera connected to the computer, usually 0 refers to the built-in webcam
cap = cv2.VideoCapture(2)

# Initialize the FaceDetector object
# minDetectionCon: Minimum detection confidence threshold
# modelSelection: 0 for short-range detection (2 meters), 1 for long-range detection (5 meters)
detector = FaceDetector(minDetectionCon=0.5, modelSelection=0)

# Run the loop to continually get frames from the webcam
while True:
# Read the current frame from the webcam
# success: Boolean, whether the frame was successfully grabbed
# img: the captured frame
success, img = cap.read()

# Detect faces in the image
# img: Updated image
# bboxs: List of bounding boxes around detected faces
img, bboxs = detector.findFaces(img, draw=False)

# Check if any face is detected
if bboxs:
# Loop through each bounding box
for bbox in bboxs:
# bbox contains 'id', 'bbox', 'score', 'center'

# ---- Get Data ---- #
center = bbox["center"]
x, y, w, h = bbox['bbox']
score = int(bbox['score'][0] * 100)

# ---- Draw Data ---- #
cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED)
cvzone.putTextRect(img, f'{score}%', (x, y - 10))
cvzone.cornerRect(img, (x, y, w, h))

# Display the image in a window named 'Image'
cv2.imshow("Image", img)
# Wait for 1 millisecond, and keep the window open
cv2.waitKey(1)
49 changes: 49 additions & 0 deletions Examples/FaceMeshExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
from cvzone.FaceMeshModule import FaceMeshDetector
import cv2

# Initialize the webcam
# '2' indicates the third camera connected to the computer, '0' would usually refer to the built-in webcam
cap = cv2.VideoCapture(2)

# Initialize FaceMeshDetector object
# staticMode: If True, the detection happens only once, else every frame
# maxFaces: Maximum number of faces to detect
# minDetectionCon: Minimum detection confidence threshold
# minTrackCon: Minimum tracking confidence threshold
detector = FaceMeshDetector(staticMode=False, maxFaces=2, minDetectionCon=0.5, minTrackCon=0.5)

# Start the loop to continually get frames from the webcam
while True:
# Read the current frame from the webcam
# success: Boolean, whether the frame was successfully grabbed
# img: The current frame
success, img = cap.read()

# Find face mesh in the image
# img: Updated image with the face mesh if draw=True
# faces: Detected face information
img, faces = detector.findFaceMesh(img, draw=True)

# Check if any faces are detected
if faces:
# Loop through each detected face
for face in faces:
# Get specific points for the eye
# leftEyeUpPoint: Point above the left eye
# leftEyeDownPoint: Point below the left eye
leftEyeUpPoint = face[159]
leftEyeDownPoint = face[23]

# Calculate the vertical distance between the eye points
# leftEyeVerticalDistance: Distance between points above and below the left eye
# info: Additional information (like coordinates)
leftEyeVerticalDistance, info = detector.findDistance(leftEyeUpPoint, leftEyeDownPoint)

# Print the vertical distance for debugging or information
print(leftEyeVerticalDistance)

# Display the image in a window named 'Image'
cv2.imshow("Image", img)

# Wait for 1 millisecond to check for any user input, keeping the window open
cv2.waitKey(1)
34 changes: 34 additions & 0 deletions Examples/FindCountrousExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import cv2 # Importing the OpenCV library for computer vision tasks
import cvzone # Importing the cvzone library for additional functionalities
import numpy as np # Importing NumPy library for numerical operations

# Download an image containing shapes from a given URL
imgShapes = cvzone.downloadImageFromUrl(
url='https://github.com/cvzone/cvzone/blob/master/Results/shapes.png?raw=true')

# Perform edge detection using the Canny algorithm
imgCanny = cv2.Canny(imgShapes, 50, 150)

# Dilate the edges to strengthen the detected contours
imgDilated = cv2.dilate(imgCanny, np.ones((5, 5), np.uint8), iterations=1)

# Find contours in the image without any corner filtering
imgContours, conFound = cvzone.findContours(
imgShapes, imgDilated, minArea=1000, sort=True,
filter=None, drawCon=True, c=(255, 0, 0), ct=(255, 0, 255),
retrType=cv2.RETR_EXTERNAL, approxType=cv2.CHAIN_APPROX_NONE)

# Find contours in the image and filter them based on corner points (either 3 or 4 corners)
imgContoursFiltered, conFoundFiltered = cvzone.findContours(
imgShapes, imgDilated, minArea=1000, sort=True,
filter=[3, 4], drawCon=True, c=(255, 0, 0), ct=(255, 0, 255),
retrType=cv2.RETR_EXTERNAL, approxType=cv2.CHAIN_APPROX_NONE)

# Display the image with all found contours
cv2.imshow("imgContours", imgContours)

# Display the image with filtered contours (either 3 or 4 corners)
cv2.imshow("imgContoursFiltered", imgContoursFiltered)

# Wait until a key is pressed to close the windows
cv2.waitKey(0)
26 changes: 26 additions & 0 deletions Examples/FpsExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import cvzone
import cv2

# Initialize the FPS class with an average count of 30 frames for smoothing
fpsReader = cvzone.FPS(avgCount=30)

# Initialize the webcam and set it to capture
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FPS, 30) # Set the frames per second to 30

# Main loop to capture frames and display FPS
while True:
# Read a frame from the webcam
success, img = cap.read()

# Update the FPS counter and draw the FPS on the image
# fpsReader.update returns the current FPS and the updated image
fps, img = fpsReader.update(img, pos=(20, 50),
bgColor=(255, 0, 255), textColor=(255, 255, 255),
scale=3, thickness=3)

# Display the image with the FPS counter
cv2.imshow("Image", img)

# Wait for 1 ms to show this frame, then continue to the next frame
cv2.waitKey(1)
62 changes: 62 additions & 0 deletions Examples/HandTrackingExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from cvzone.HandTrackingModule import HandDetector
import cv2

# Initialize the webcam to capture video
# The '2' indicates the third camera connected to your computer; '0' would usually refer to the built-in camera
cap = cv2.VideoCapture(2)

# Initialize the HandDetector class with the given parameters
detector = HandDetector(staticMode=False, maxHands=2, modelComplexity=1, detectionCon=0.5, minTrackCon=0.5)

# Continuously get frames from the webcam
while True:
# Capture each frame from the webcam
# 'success' will be True if the frame is successfully captured, 'img' will contain the frame
success, img = cap.read()

# Find hands in the current frame
# The 'draw' parameter draws landmarks and hand outlines on the image if set to True
# The 'flipType' parameter flips the image, making it easier for some detections
hands, img = detector.findHands(img, draw=True, flipType=True)

# Check if any hands are detected
if hands:
# Information for the first hand detected
hand1 = hands[0] # Get the first hand detected
lmList1 = hand1["lmList"] # List of 21 landmarks for the first hand
bbox1 = hand1["bbox"] # Bounding box around the first hand (x,y,w,h coordinates)
center1 = hand1['center'] # Center coordinates of the first hand
handType1 = hand1["type"] # Type of the first hand ("Left" or "Right")

# Count the number of fingers up for the first hand
fingers1 = detector.fingersUp(hand1)
print(f'H1 = {fingers1.count(1)}', end=" ") # Print the count of fingers that are up

# Calculate distance between specific landmarks on the first hand and draw it on the image
length, info, img = detector.findDistance(lmList1[8][0:2], lmList1[12][0:2], img, color=(255, 0, 255),
scale=10)

# Check if a second hand is detected
if len(hands) == 2:
# Information for the second hand
hand2 = hands[1]
lmList2 = hand2["lmList"]
bbox2 = hand2["bbox"]
center2 = hand2['center']
handType2 = hand2["type"]

# Count the number of fingers up for the second hand
fingers2 = detector.fingersUp(hand2)
print(f'H2 = {fingers2.count(1)}', end=" ")

# Calculate distance between the index fingers of both hands and draw it on the image
length, info, img = detector.findDistance(lmList1[8][0:2], lmList2[8][0:2], img, color=(255, 0, 0),
scale=10)

print(" ") # New line for better readability of the printed output

# Display the image in a window
cv2.imshow("Image", img)

# Keep the window open and update it for each frame; wait for 1 millisecond between frames
cv2.waitKey(1)
20 changes: 20 additions & 0 deletions Examples/OverlayPNGExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import cv2
import cvzone

# Initialize camera capture
cap = cv2.VideoCapture(2)

imgPNG = cvzone.downloadImageFromUrl(
url='https://github.com/cvzone/cvzone/blob/master/Results/cvzoneLogo.png?raw=true',
keepTransparency=True)

while True:
# Read image frame from camera
success, img = cap.read()

imgOverlay = cvzone.overlayPNG(img, imgPNG, pos=[-30, 50])
imgOverlay = cvzone.overlayPNG(img, imgPNG, pos=[200, 200])
imgOverlay = cvzone.overlayPNG(img, imgPNG, pos=[500, 400])

cv2.imshow("imgOverlay", imgOverlay)
cv2.waitKey(1)
52 changes: 52 additions & 0 deletions Examples/PlotModuleExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from cvzone.PlotModule import LivePlot
from cvzone.FaceDetectionModule import FaceDetector
import cv2
import cvzone
import math

cap = cv2.VideoCapture(2)
detector = FaceDetector(minDetectionCon=0.85, modelSelection=0)

xPlot = LivePlot(w=1200, yLimit=[0, 500], interval=0.01)
sinPlot = LivePlot(w=1200, yLimit=[-100, 100], interval=0.01)
xSin=0



while True:
success, img = cap.read()

# Detect faces in the image
# img: Updated image
# bboxs: List of bounding boxes around detected faces
img, bboxs = detector.findFaces(img, draw=False)
val = 0
# Check if any face is detected
if bboxs:
# Loop through each bounding box
for bbox in bboxs:
# bbox contains 'id', 'bbox', 'score', 'center'

# ---- Get Data ---- #
center = bbox["center"]
x, y, w, h = bbox['bbox']
score = int(bbox['score'][0] * 100)
val = center[0]
# ---- Draw Data ---- #
cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED)
cvzone.putTextRect(img, f'{score}%', (x, y - 10))
cvzone.cornerRect(img, (x, y, w, h))

xSin += 1
if xSin == 360: xSin = 0
imgPlotSin = sinPlot.update(int(math.sin(math.radians(xSin)) * 100))
imgPlot = xPlot.update(val)


cv2.imshow("Image Plot", imgPlot)
cv2.imshow("Image Sin Plot", imgPlotSin)

cv2.imshow("Image", img)

if cv2.waitKey(1) & 0xFF == ord('q'):
break

0 comments on commit c917488

Please sign in to comment.