Skip to content

Commit

Permalink
Merge pull request #16 from DEVOCEAN-YOUNG-404/seungjun
Browse files Browse the repository at this point in the history
Fix: 플러그인 Footer 오류 수정
  • Loading branch information
whateveriiwant committed Aug 22, 2023
2 parents 7e5e184 + 34c2bc7 commit 8f659cf
Show file tree
Hide file tree
Showing 7 changed files with 1,476 additions and 8 deletions.
69 changes: 69 additions & 0 deletions ML/DataCollect.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import cv2
import mediapipe as mp
import numpy as np

def calculate_angle(joint):
v1 = joint[[0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19], :]
v2 = joint[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], :]
v = v2 - v1
# Nomalize v
v = v / np.linalg.norm(v, axis=1)[:, np.newaxis]

# Dot product의 아크코사인으로 각도를 구한다.
compareV1 = v[[0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 16, 17], :]
compareV2 = v[[1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19], :]
angle = np.arccos(np.einsum('nt,nt->n', compareV1, compareV2))
angle = np.degrees(angle) # radian값을 degree로 변환

return angle

cap = cv2.VideoCapture(0)

mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands

with mp_hands.Hands(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:

while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
continue

image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)

image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

if results.multi_hand_landmarks:
for res in results.multi_hand_landmarks:
joint = np.zeros((21, 3))
for j, lm in enumerate(res.landmark):
joint[j] = [lm.x, lm.y, lm.z]

angle = calculate_angle(joint)

mp_drawing.draw_landmarks(
image, res, mp_hands.HAND_CONNECTIONS)

cv2.imshow('MediaPipe Hands', image)

key = cv2.waitKey(1)
if key == ord('.'):
if results.multi_hand_landmarks:
# 라벨 입력 받기
label = input("라벨을 입력하세요 (예: open_hand, closed_fist, peace_sign 등): ")

# 라벨과 각도 값을 텍스트 파일에 저장
with open('hand_data_with_labels.txt', 'a') as file:
file.write(f"{','.join(str(a) for a in angle)},{label}\n")

elif key == 27: # ESC 키를 누르면 루프 종료
break

cap.release()
cv2.destroyAllWindows()
1,295 changes: 1,295 additions & 0 deletions ML/dataSet.txt

Large diffs are not rendered by default.

104 changes: 104 additions & 0 deletions ML/웹캠으로 출력.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
import cv2
import mediapipe as mp
import numpy as np
import json
import keyboard
import time

max_num_hands = 1

gesture = {
0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h',
8: 'i', 9: 'j', 10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o',
15: 'p', 16: 'q', 17: 'r', 18: 's', 19: 't', 20: 'u', 21: 'v',
22: 'w', 23: 'x', 24: 'y', 25: 'z', 26: 'spacing', 27: 'backspace', 28:'1',
29: '2', 30: '3', 31: '4', 32: '5', 33: '6', 34: '7', 35:'8', 36: '9', 37:'0'
}

mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
hands = mp_hands.Hands(
max_num_hands=max_num_hands,
min_detection_confidence=0.5,
min_tracking_confidence=0.5
)

file = np.genfromtxt('dataSet.txt', delimiter=',')
angleFile = file[:, :-1]
labelFile = file[:, -1]
angle = angleFile.astype(np.float32)
label = labelFile.astype(np.float32)

knn = cv2.ml.KNearest_create()
knn.train(angle, cv2.ml.ROW_SAMPLE, label)
cap = cv2.VideoCapture(0)

startTime = time.time()
prev_index = 0
sentence = ''
recognizeDelay = 1

output_data = {'gestures': [], 'text': ''}

while True:
ret, img = cap.read()
if not ret:
continue
img = cv2.flip(img, 1) # 이미지를 좌우 반전시켜 원래 방향으로 표시

result = hands.process(img)

if result.multi_hand_landmarks is not None:
for res in result.multi_hand_landmarks:
joint = np.zeros((21, 3))
for j, lm in enumerate(res.landmark):
joint[j] = [lm.x, lm.y, lm.z]

v1 = joint[[0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 0, 13, 14, 15, 0, 17, 18, 19], :]
v2 = joint[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], :]
v = v2 - v1
v = v / np.linalg.norm(v, axis=1)[:, np.newaxis]

compareV1 = v[[0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 16, 17], :]
compareV2 = v[[1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19], :]
angle = np.arccos(np.einsum('nt,nt->n', compareV1, compareV2))
angle = np.degrees(angle)

data = np.array([angle], dtype=np.float32)
ret, results, neighbours, dist = knn.findNearest(data, 3)
idx = int(results[0][0])

if idx in gesture.keys():
if idx != prev_index:
startTime = time.time()
prev_index = idx
else:
if time.time() - startTime > recognizeDelay:
if idx == 26:
sentence += ' '
elif idx == 27:
if len(sentence) > 0:
sentence = sentence[:-1] # 마지막 문자 제거
else:
sentence += gesture[idx]
startTime = time.time()

cv2.putText(img, gesture[idx].upper(), (int(res.landmark[0].x * img.shape[1] - 10),
int(res.landmark[0].y * img.shape[0] + 40)),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 3)

mp_drawing.draw_landmarks(img, res, mp_hands.HAND_CONNECTIONS)
cv2.putText(img, sentence, (20, 440), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3)

cv2.imshow('HandTracking', img)

key = cv2.waitKey(1)
if key == ord('.'):
output_data['text'] = sentence
json_filename = 'output_data.json'
with open(json_filename, 'w') as json_file:
json.dump(output_data, json_file, indent=4)
break

cap.release()
cv2.destroyAllWindows()
File renamed without changes.
4 changes: 2 additions & 2 deletions front/src/components/Footer/Footer.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ const Footer = () => {

return (
<div
className={`bg-[#23A352] w-full left-0 ${
path === "/" ? "fixed bottom-[0px]" : "bottom-0"
className={`bg-[#23A352] w-full ${
path === "/" ? "fixed bottom-[0px]" : ""
} h-[130px] flex flex-row items-center justify-center`}
>
<img src={logo} alt="logo" className="w-auto h-[130px]" />
Expand Down
2 changes: 1 addition & 1 deletion front/src/pages/PlugIn/PlugIn.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ const Plugin = () => {
const isLogin = useRecoilValue(authState);

return (
<div className="min-w-[1366px] h-[100vh] flex flex-col">
<div className="w-full h-[100vh] flex flex-col min-h-full">
<Header />
{isLogin ? <UsrOnline /> : <UsrOffline />}
<Footer />
Expand Down
10 changes: 5 additions & 5 deletions front/src/pages/PlugIn/UsrOnline.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ const UsrOnline = () => {
};

return (
<div className="min-w-[1366px] h-[100vh] flex flex-row w-full">
<div className="min-w-[1366px] h-[1300px] flex flex-row w-full">
{openModal && <Modal onOpenModal={onModalAlert} />}
<div className="w-full h-full flex items-start justify-start mt-[100px] flex-col">
<p className="text-5xl font-bold text-black font-main ml-[118px]">
<div className="w-full h-auto flex items-start justify-start mt-[100px] flex-col mb-[100px]">
<p className="h-auto text-5xl font-bold text-black font-main ml-[118px]">
플러그인
</p>
<div className="flex flex-row items-center justify-center w-full md:scale-75 xl:scale-[85%] 2xl:scale-90 3xl:scale-100">
<div className="flex flex-row h-[320px] items-center justify-center w-full md:scale-75 xl:scale-[85%] 2xl:scale-90 3xl:scale-100">
<div className="w-[301px] h-[320px] mt-[50px] border border-[#5865F2] flex flex-col items-center justify-center rounded-xl ">
<img
src={discord_blue}
Expand Down Expand Up @@ -79,7 +79,7 @@ const UsrOnline = () => {
</button>
</div>
</div>
<div className="flex flex-row items-center justify-center w-full md:scale-75 xl:scale-[85%] 2xl:scale-90 3xl:scale-100">
<div className="flex flex-row h-[320px] items-center justify-center w-full md:scale-75 xl:scale-[85%] 2xl:scale-90 3xl:scale-100">
<div className="w-[301px] h-[320px] mt-[50px] border border-[#E83B6F] flex flex-col items-center justify-center rounded-xl ">
<img
src={instagram}
Expand Down

0 comments on commit 8f659cf

Please sign in to comment.