-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathhelperFunctions.js
72 lines (69 loc) · 2.05 KB
/
helperFunctions.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import * as tf from "@tensorflow/tfjs";
let sequence = [];
let sentence = [];
let prevWord = "";
const actions = ["Hello", "Thanks", "Goodbye", "Please", "Yes", "No"];
export const onResults = (
results,
model,
speechSynthesisUtterance,
textAreaRef
) => {
if (model !== null) {
try {
let pose = new Array(33 * 4).fill(0),
face = new Array(468 * 3).fill(0),
lh = new Array(21 * 3).fill(0),
rh = new Array(21 * 3).fill(0);
console.log("getting frame");
if (results.poseLandmarks) {
let arr = [];
for (let res of results.poseLandmarks) {
arr.push(...[res.x, res.y, res.z, res.visibility]);
}
pose = arr;
}
if (results.faceLandmarks) {
let arr = [];
for (let res of results.faceLandmarks) {
arr.push(...[res.x, res.y, res.z]);
}
face = arr;
}
if (results.leftHandLandmarks) {
let arr = [];
for (let res of results.leftHandLandmarks) {
arr.push(...[res.x, res.y, res.z]);
}
lh = arr;
}
if (results.rightHandLandmarks) {
let arr = [];
for (let res of results.rightHandLandmarks) {
arr.push(...[res.x, res.y, res.z]);
}
rh = arr;
}
sequence.push([...pose, ...face, ...lh, ...rh]);
if (sequence.length === 20) {
let new_tensor = tf.tensor2d(sequence);
let model_result = model.predict(tf.expandDims(new_tensor, 0));
model_result.array().then((res) => {
let prediction = actions[res[0].indexOf(Math.max(...res[0]))];
if (prediction !== prevWord) {
speechSynthesisUtterance.text = prediction;
window.speechSynthesis.speak(speechSynthesisUtterance);
textAreaRef.current.innerText = prediction;
sentence.push(prediction);
}
prevWord = prediction;
});
sequence = [];
}
} catch (err) {
sequence = [];
console.log(err);
}
}
};
export const resetSentence = () => (sentence = []);