diff --git a/Hand Gesture Volume Control/Gesture Control .py b/Hand Gesture Volume Control/Gesture Control .py new file mode 100644 index 000000000..807e34e97 --- /dev/null +++ b/Hand Gesture Volume Control/Gesture Control .py @@ -0,0 +1,98 @@ +import cv2 +import mediapipe as mp +import Hand_detection_module as hdm +import numpy as np +import time +import math +from comtypes import CLSCTX_ALL +from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume + +devices = AudioUtilities.GetSpeakers() +interface = devices.Activate( + IAudioEndpointVolume._iid_, CLSCTX_ALL, None) +volume = interface.QueryInterface(IAudioEndpointVolume) +# volume.GetMute() +# volume.GetMasterVolumeLevel() + +vol = volume.GetVolumeRange() + + +vMin = vol[0] +vMax = vol[1] + + + + +######################## +wCam ,hCam = 720 , 620 +######################## + + +cap = cv2.VideoCapture(0) +cap.set(3 , wCam) +cap.set(4,hCam) +pTime = 0 +per = 0 + +delector = hdm.HandDetection(min_detection_confidence= .6) + +while True: + rec , frame = cap.read() + + frame = delector.findHand(frame ) + lmlist = delector.findPosition(frame , draw= False) + if len(lmlist) != 0: + + print(lmlist[4] , lmlist[8]) + x1, y1 = lmlist[4][1] , lmlist[4][2] + x2, y2 = lmlist[8][1] , lmlist[8][2] + cv2.circle(frame , (x1, y1) ,15 , (255, 0 ,255) , -1 ) + cv2.circle(frame , (x2, y2) ,15 , (255, 0 ,255) , -1 ) + cv2.line(frame , (x1, y1) , (x2 , y2) , (255, 0 , 255), 3) + cx , cy = (x1 + x2)//2 , (y1+y2)//2 + cv2.circle(frame , (cx , cy) ,15 , (255, 0 ,255) , -1 ) + + dis = math.hypot(x2-x1 , y2 - y1) + # print(dis) + # range of dis = ( 50 , 300) + + finalVol = np.interp(dis , [50 , 280] , [vMin , vMax]) + height = np.interp(dis , [50 , 280] , [400 , 150]) + vol = np.interp(dis , [50 , 280] , [0 , 100]) + volume.SetMasterVolumeLevel(finalVol, None) + + print(finalVol) + + cv2.rectangle(frame , (50 , 150) , (85 , 400) , (0, 255, 0) , 3) + + cv2.rectangle(frame , (50 , int(height)) , (85 , 400) , (0, 256 , 0) , -1) + cv2.putText(frame , f'{str(int(vol))} %' , (48 , 458) , cv2.FONT_HERSHEY_COMPLEX , 1 , (0, 256 , 0) , 2 ) + + + + + + if dis < 50: + cv2.circle(frame , (cx , cy) ,15 , (0, 0 ,255) , -1 ) + + if dis > 280: + cv2.circle(frame , (cx , cy) ,15 , (0, 255 ,0) , -1 ) + + + + + + + cTime = time.time() + fps = 1/(cTime - pTime) + pTime = cTime + + cv2.putText(frame , f'FPS : {str(int(fps))}' , (10 , 40) , cv2.FONT_HERSHEY_COMPLEX , 1 , (0, 255 , 0) , 2 ) + + cv2.imshow("webcam" , frame) + if cv2.waitKey(1) & 0xFF == ord('x'): + break + +cap.release() +cv2.destroyAllWindows() + diff --git a/Hand Gesture Volume Control/Hand_detection_module.py b/Hand Gesture Volume Control/Hand_detection_module.py new file mode 100644 index 000000000..fe6eebb3d --- /dev/null +++ b/Hand Gesture Volume Control/Hand_detection_module.py @@ -0,0 +1,87 @@ +import cv2 +import time +import mediapipe as mp + +# mode = False , maxHands = 2 , detectionCon = 0.5 , TrackCon = 0.5 +class HandDetection: + def __init__(self , min_detection_confidence = 0.5): + # self.mode = mode + # self.maxHand = maxHands + self.min_detection_confidence = min_detection_confidence + # self.TrackCon = TrackCon + + self.mpHands = mp.solutions.hands + self.hands = self.mpHands.Hands( self.min_detection_confidence ) + self.mpDraw = mp.solutions.drawing_utils + + + def findHand(self , frame , flag = True): + RGB_frame = cv2.cvtColor(frame , cv2.COLOR_BGR2RGB) + self.results = self.hands.process(RGB_frame) + + # print(self.results.multi_hand_landmarks) + + if self.results.multi_hand_landmarks: + for multihands in self.results.multi_hand_landmarks: + if flag: + self.mpDraw.draw_landmarks(frame , multihands , self.mpHands.HAND_CONNECTIONS) + + return frame + + def findPosition(self , frame , handno = 0 , draw = True): + + lmList = [] + + if self.results.multi_hand_landmarks: + myHand = self.results.multi_hand_landmarks[handno] + + for id , lm in enumerate(myHand.landmark): + h , w , c = frame.shape + cx , cy = int(lm.x*w) , int(lm.y*h) + # print(id , cx , cy) + lmList.append([id , cx , cy]) + + if draw: + + cv2.circle(frame , (cx , cy) , 7 , (255 , 0 , 9) , cv2.FILLED) + + return lmList + + +def main(): + pTime = 0 + cTime = 0 + + + cap = cv2.VideoCapture(0) + + detector = HandDetection() + + + while True: + + rec , frame = cap.read() + + frame = detector.findHand(frame) + lmlist = detector.findPosition(frame) + + if len(lmlist) != 0: + print(lmlist[4]) + + cTime =time.time() + Fps = 1/(cTime - pTime) + pTime = cTime + + cv2.putText(frame , str(int(Fps)) , (10 , 40) , cv2.FONT_HERSHEY_COMPLEX , 1 , (0,255 , 0) , 2 ) + + + + cv2.imshow("webcam" , frame) + if cv2.waitKey(1) & 0xFF == ord("x"): + break + + cap.release() + cv2.destroyAllWindows() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/Hand Gesture Volume Control/Raw code module.py b/Hand Gesture Volume Control/Raw code module.py new file mode 100644 index 000000000..184d19a2e --- /dev/null +++ b/Hand Gesture Volume Control/Raw code module.py @@ -0,0 +1,52 @@ +import mediapipe as mp +import cv2 +import numpy as np +import time + +mpHands = mp.solutions.hands +hands = mpHands.Hands() +mpDraw = mp.solutions.drawing_utils + +pTime = 0 +cTime = 0 + + +cap = cv2.VideoCapture(0) + +while True: + + rec , frame = cap.read() + + gray_frame = cv2.cvtColor(frame , cv2.COLOR_BGR2RGB) + results = hands.process(gray_frame) + + print(results.multi_hand_landmarks) + + if results.multi_hand_landmarks: + for multihands in results.multi_hand_landmarks: + for id , lm in enumerate(multihands.landmark): + h , w , c = frame.shape + cx , cy = int(lm.x*w) , int(lm.y*h) + print(id , cx , cy) + + if id == 4: + cv2.circle(frame , (cx , cy) , 15 , (255 , 255 , 9) , cv2.FILLED) + + mpDraw.draw_landmarks(frame , multihands , mpHands.HAND_CONNECTIONS) + + cTime =time.time() + Fps = 1/(cTime - pTime) + pTime = cTime + + cv2.putText(frame , str(int(Fps)) , (10 , 40) , cv2.FONT_HERSHEY_COMPLEX , 1 , (0,255 , 0) , 2 ) + + + + cv2.imshow("webcam" , frame) + if cv2.waitKey(1) & 0xFF == ord("x"): + break + +cap.release() +cv2.destroyAllWindows() + + \ No newline at end of file diff --git a/Hand Gesture Volume Control/__pycache__/Hand_detection_module.cpython-38.pyc b/Hand Gesture Volume Control/__pycache__/Hand_detection_module.cpython-38.pyc new file mode 100644 index 000000000..984f35607 Binary files /dev/null and b/Hand Gesture Volume Control/__pycache__/Hand_detection_module.cpython-38.pyc differ diff --git a/Hand Gesture Volume Control/readme.md b/Hand Gesture Volume Control/readme.md new file mode 100644 index 000000000..86a284f03 --- /dev/null +++ b/Hand Gesture Volume Control/readme.md @@ -0,0 +1,43 @@ +# Gesture Control System + +## Overview +The Gesture Control System allows users to control the volume of their computer using hand gestures. This is achieved by detecting the distance between the thumb and index finger using a webcam, and then mapping this distance to a volume level. The system is implemented using OpenCV for video capture, MediaPipe for hand detection, and the Pycaw library for controlling the system audio. + +## Features +- **Hand Detection:** Utilizes MediaPipe's hand detection module to detect and track the position of the hand in real-time. +- **Gesture Control:** Calculates the distance between the thumb and index finger and maps this distance to control the system's audio volume. +- **Visual Feedback:** Provides real-time visual feedback of the hand positions and the current volume level on the webcam feed. + +## Requirements +- Python 3.x +- OpenCV +- MediaPipe +- Numpy +- Pycaw +- Comtypes + +To install the required libraries, you can use the following pip command: +```bash +pip install opencv-python mediapipe numpy pycaw comtypes +``` +## How to Run + +1. Ensure that your system has a working webcam. +2. Install the required libraries as mentioned in the Requirements section. +3. Run the Python script: + ```bash + python Gesture_Control.py + ``` +4. Use your thumb and index finger to control the volume: + - Bring them closer to decrease the volume. + - Move them apart to increase the volume. + +5. Press `x` on the keyboard to exit the program. + +## Advantages +- **Contactless Control:** Allows users to control volume without any physical contact, making it ideal for environments where hands-free operation is essential. +- **Real-time Operation:** The system operates in real-time, providing immediate feedback and control. + +## Limitations +- **Lighting Conditions:** The performance of the hand detection might vary depending on the lighting conditions. +- **Single-Purpose:** The system is designed specifically for volume control; extending it to other applications would require additional development.