Skip to content

Commit 20d68b2

Browse files
authored
Merge pull request #118 from ayushete02/master
Create: Hand Volume Changer
2 parents 61ee236 + afab7d1 commit 20d68b2

File tree

2 files changed

+225
-0
lines changed

2 files changed

+225
-0
lines changed

Hand_Volume/handTrackingModule.py

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
import cv2
2+
import mediapipe as mp
3+
import time
4+
5+
6+
class handDetector():
7+
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
8+
self.mode = mode
9+
self.maxHands = maxHands
10+
self.detectionCon = detectionCon
11+
self.trackCon = trackCon
12+
13+
self.mpHands = mp.solutions.hands
14+
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
15+
self.detectionCon, self.trackCon)
16+
self.mpDraw = mp.solutions.drawing_utils
17+
18+
def findHands(self, img, draw=True):
19+
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
20+
self.results = self.hands.process(imgRGB)
21+
#print(results.multi_hand_landmarks)
22+
23+
if self.results.multi_hand_landmarks:
24+
for handLms in self.results.multi_hand_landmarks:
25+
if draw:
26+
self.mpDraw.draw_landmarks(img, handLms,
27+
self.mpHands.HAND_CONNECTIONS)
28+
return img
29+
30+
def findPosition(self, img, handNo=0, draw=True):
31+
32+
lmList = []
33+
if self.results.multi_hand_landmarks:
34+
myHand = self.results.multi_hand_landmarks[handNo]
35+
for id, lm in enumerate(myHand.landmark):
36+
# print(id, lm)
37+
h, w, c = img.shape
38+
cx, cy = int(lm.x * w), int(lm.y * h)
39+
# print(id, cx, cy)
40+
lmList.append([id, cx, cy])
41+
if draw:
42+
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
43+
44+
return lmList
45+
46+
47+
def main():
48+
pTime = 0
49+
cTime = 0
50+
cap = cv2.VideoCapture(0)
51+
detector = handDetector()
52+
while True:
53+
success, img = cap.read()
54+
img = detector.findHands(img)
55+
lmList = detector.findPosition(img)
56+
if len(lmList) != 0:
57+
print(lmList[4])
58+
59+
cTime = time.time()
60+
fps = (float(1.0) / float((cTime - pTime)))
61+
pTime = cTime
62+
63+
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
64+
(255, 0, 255), 3)
65+
66+
cv2.imshow("Image", img)
67+
cv2.waitKey(1)
68+
69+
70+
if __name__ == "__main__":
71+
main()

Hand_Volume/main.py

Lines changed: 154 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,154 @@
1+
wCam, hCam = 640, 480
2+
frameR = 100 # Frame Reduction
3+
smoothening = 7
4+
5+
pTime = 0
6+
plocX, plocY = 0, 0
7+
clocX, clocY = 0, 0
8+
9+
import numpy as np
10+
import cv2
11+
import os
12+
from PIL import Image, ImageDraw
13+
import math
14+
import time
15+
import handTrackingModule as htm
16+
from ctypes import cast, POINTER
17+
from comtypes import CLSCTX_ALL
18+
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
19+
import math
20+
21+
# Get default audio device using PyCAW
22+
devices = AudioUtilities.GetSpeakers()
23+
interface = devices.Activate(
24+
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
25+
volume = cast(interface, POINTER(IAudioEndpointVolume))
26+
# Get current volume
27+
currentVolumeDb = volume.GetMasterVolumeLevel()
28+
29+
30+
wcam,hcam=640,480
31+
cap=cv2.VideoCapture(0)
32+
cap.set(3,wcam)
33+
cap.set(4,hcam)
34+
pTime=0
35+
detector = htm.handDetector(detectionCon=0.75)
36+
37+
LastPx = 800
38+
LastPy = 0
39+
40+
Percent = 0
41+
42+
while True:
43+
# Background = Image.open('Background.jpg')
44+
success,img=cap.read()
45+
46+
img = detector.findHands(img, draw=True )
47+
lmList=detector.findPosition(img,draw=False)
48+
#print(lmList)
49+
tipId=[4,8,12,16,20]
50+
51+
#Rectangle
52+
53+
if(len(lmList)!=0):
54+
fingers=[]
55+
#thumb
56+
if(lmList[tipId[0]][1]>lmList[tipId[0]-1][1]):
57+
fingers.append(1)
58+
else :
59+
fingers.append(0)
60+
#4 fingers
61+
for id in range(1,len(tipId)):
62+
63+
if(lmList[tipId[id]][2]<lmList[tipId[id]-2][2]):
64+
fingers.append(1)
65+
66+
else :
67+
fingers.append(0)
68+
if len(lmList) != 0:
69+
x1, y1 = lmList[8][1:] #2nd finger
70+
# x0, y0 = lmList[12][1:] #Middle Finder
71+
x0, y0 = lmList[4][1:] #thumb
72+
73+
# print(f'1{fingers[0]}')
74+
# print(f'2{fingers[1]}')
75+
# print(f'3{fingers[2]}')
76+
# print(f'4{fingers[3]}')
77+
# print(f'5{fingers[4]}')
78+
79+
# if fingers[1] == 1 and fingers[2] == 0: #for 2nd finger and al finger close
80+
if fingers[1] == 1 and fingers[0] == 1:
81+
#2nd Finger
82+
x3 = np.interp(x1, (frameR, wCam - frameR), (0, 1080))
83+
y3 = np.interp(y1, (frameR, hCam - frameR), (0, 720))
84+
85+
clocX = plocX + (x3 - plocX) / smoothening
86+
clocY = plocY + (y3 - plocY) / smoothening
87+
88+
cv2.circle(img, (x1, y1), 15, (0, 0,255), cv2.FILLED)
89+
plocX, plocY = clocX, clocY
90+
91+
#THUMB
92+
x3 = np.interp(x0, (frameR, wCam - frameR), (0, 1080))
93+
y3 = np.interp(y0, (frameR, hCam - frameR), (0, 720))
94+
95+
clocX = plocX + (x3 - plocX) / smoothening
96+
clocY = plocY + (y3 - plocY) / smoothening
97+
98+
cv2.circle(img, (x0, y0), 15, (0, 0, 255), cv2.FILLED)
99+
plocX, plocY = clocX, clocY
100+
101+
cv2.line(img,(x1,y1),(x0,y0),(0,0,255),2)
102+
distance = math.sqrt((x1-x0)*(x1-x0)+(y1-y0)*(y1-y0))
103+
# print(distance)
104+
105+
106+
if distance<50:
107+
distance = 50
108+
elif distance>250:
109+
distance=250
110+
Percent = round((distance-50.0)/2)
111+
# print(Percent)
112+
113+
114+
# Changevolume = round(0.6525*math.log(Percent/100)*100)
115+
Changevolume = round(math.log((Percent/10)+1)*50*0.54 )
116+
print(Changevolume)
117+
volume.SetMasterVolumeLevel(-65.25+Changevolume, None)
118+
119+
120+
121+
height = int(340 - (Percent*2.0))
122+
print(height)
123+
cv2.rectangle(img,(570,height),(620,340),(0,255,0),-1)
124+
cv2.rectangle(img,(570,140),(620,340),(255,0,0),2)
125+
img = cv2.flip(img, 1)
126+
127+
font = cv2.FONT_HERSHEY_SIMPLEX
128+
bottomLeftCornerOfText = (15,130)
129+
fontScale = 1
130+
fontColor = (0,0,0)
131+
lineType = 2
132+
133+
cv2.putText(img,f'{Percent}%', bottomLeftCornerOfText, font, fontScale,fontColor,lineType)
134+
135+
cTime=time.time()
136+
fps=1.0/float(cTime-pTime)
137+
pTime=cTime
138+
139+
cv2.imshow("image",img)
140+
141+
142+
143+
# Background = cv2.imread('Background.jpg')
144+
# Background = cv2.flip(Background, 1)
145+
# cv2.imshow('img',Background)
146+
147+
148+
if(cv2.waitKey(1) & 0xFF== ord('q')):
149+
break
150+
151+
152+
cap.release()
153+
154+
cv2.destroyAllWindows()

0 commit comments

Comments
 (0)