Skip to content

Commit 90d8c1e

Browse files
author
Ayaan Farooqui
committed
added mp holistic pose detection and custom model
1 parent 63d883c commit 90d8c1e

File tree

8 files changed

+1647
-56
lines changed

8 files changed

+1647
-56
lines changed

package-lock.json

Lines changed: 1292 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,18 @@
33
"version": "0.1.0",
44
"private": true,
55
"dependencies": {
6+
"@emotion/react": "^11.8.1",
7+
"@emotion/styled": "^11.8.1",
8+
"@mediapipe/camera_utils": "^0.3.1640029074",
9+
"@mediapipe/control_utils": "^0.6.1629159505",
10+
"@mediapipe/drawing_utils": "^0.3.1620248257",
11+
"@mediapipe/holistic": "^0.5.1635989137",
12+
"@mui/material": "^5.4.3",
13+
"@tensorflow/tfjs": "^3.13.0",
614
"@testing-library/jest-dom": "^5.16.2",
715
"@testing-library/react": "^12.1.3",
816
"@testing-library/user-event": "^13.5.0",
17+
"device-detector-js": "^3.0.1",
918
"react": "^17.0.2",
1019
"react-dom": "^17.0.2",
1120
"react-scripts": "5.0.0",
2.28 MB
Binary file not shown.

public/jsonmodel/model.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"format": "layers-model", "generatedBy": "keras v2.5.0", "convertedBy": "TensorFlow.js Converter v3.13.0", "modelTopology": {"keras_version": "2.5.0", "backend": "tensorflow", "model_config": {"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 30, 1662], "dtype": "float32", "sparse": false, "ragged": false, "name": "lstm_input"}}, {"class_name": "LSTM", "config": {"name": "lstm", "trainable": true, "batch_input_shape": [null, 30, 1662], "dtype": "float32", "return_sequences": true, "return_state": false, "go_backwards": false, "stateful": false, "unroll": false, "time_major": false, "units": 64, "activation": "relu", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 2}}, {"class_name": "LSTM", "config": {"name": "lstm_1", "trainable": true, "dtype": "float32", "return_sequences": true, "return_state": false, "go_backwards": false, "stateful": false, "unroll": false, "time_major": false, "units": 128, "activation": "relu", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 2}}, {"class_name": "LSTM", "config": {"name": "lstm_2", "trainable": true, "dtype": "float32", "return_sequences": false, "return_state": false, "go_backwards": false, "stateful": false, "unroll": false, "time_major": false, "units": 64, "activation": "relu", "recurrent_activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 2}}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 64, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 32, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 2, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}}, "training_config": {"loss": "categorical_crossentropy", "metrics": [[{"class_name": "MeanMetricWrapper", "config": {"name": "categorical_accuracy", "dtype": "float32", "fn": "categorical_accuracy"}}]], "weighted_metrics": null, "loss_weights": null, "optimizer_config": {"class_name": "Adam", "config": {"name": "Adam", "learning_rate": 0.001, "decay": 0.0, "beta_1": 0.9, "beta_2": 0.999, "epsilon": 1e-07, "amsgrad": false}}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "dense/kernel", "shape": [64, 64], "dtype": "float32"}, {"name": "dense/bias", "shape": [64], "dtype": "float32"}, {"name": "dense_1/kernel", "shape": [64, 32], "dtype": "float32"}, {"name": "dense_1/bias", "shape": [32], "dtype": "float32"}, {"name": "dense_2/kernel", "shape": [32, 2], "dtype": "float32"}, {"name": "dense_2/bias", "shape": [2], "dtype": "float32"}, {"name": "lstm/lstm_cell/kernel", "shape": [1662, 256], "dtype": "float32"}, {"name": "lstm/lstm_cell/recurrent_kernel", "shape": [64, 256], "dtype": "float32"}, {"name": "lstm/lstm_cell/bias", "shape": [256], "dtype": "float32"}, {"name": "lstm_1/lstm_cell_1/kernel", "shape": [64, 512], "dtype": "float32"}, {"name": "lstm_1/lstm_cell_1/recurrent_kernel", "shape": [128, 512], "dtype": "float32"}, {"name": "lstm_1/lstm_cell_1/bias", "shape": [512], "dtype": "float32"}, {"name": "lstm_2/lstm_cell_2/kernel", "shape": [128, 256], "dtype": "float32"}, {"name": "lstm_2/lstm_cell_2/recurrent_kernel", "shape": [64, 256], "dtype": "float32"}, {"name": "lstm_2/lstm_cell_2/bias", "shape": [256], "dtype": "float32"}]}]}

src/App.css

Lines changed: 0 additions & 38 deletions
This file was deleted.

src/App.js

Lines changed: 99 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,107 @@
1-
import logo from './logo.svg';
2-
import './App.css';
1+
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
2+
import * as mpHolistic from "@mediapipe/holistic";
3+
import * as tf from '@tensorflow/tfjs';
4+
import { CircularProgress } from "@mui/material";
5+
import { Camera } from '@mediapipe/camera_utils';
36

47
function App() {
8+
9+
const [showLoadingSpinner, setShowLoadingSpinner] = useState(false)
10+
const canvasElementRef = useRef();
11+
const videoElementRef = useRef();
12+
const controlsRef = useRef();
13+
const [canvasCtx, setCanvasCtx] = useState(null);
14+
const [model, setModel] = useState();
15+
16+
const onResults = useCallback((results) => {
17+
try {
18+
let pose = tf.zeros([33, 4]), face = tf.zeros([468, 3]), lh = tf.zeros([21, 3]), rh = tf.zeros([21, 3]);
19+
if (results.poseLandmarks) {
20+
for (let res of results.poseLandmarks)
21+
pose = tf.reshape(tf.tensor2d([[res.x, res.y, res.z, res.visibility]]), [-1]);
22+
}
23+
if (results.faceLandmarks) {
24+
for (let res of results.faceLandmarks)
25+
face = tf.reshape(tf.tensor2d([[res.x, res.y, res.z, res.visibility]]), [-1]);
26+
}
27+
if (results.leftHandLandmarks) {
28+
for (let res of results.leftHandLandmarks)
29+
lh = tf.reshape(tf.tensor2d([[res.x, res.y, res.z, res.visibility]]).flatten(), [-1]);
30+
}
31+
if (results.rightHandLandmarks) {
32+
for (let res of results.rightHandLandmarks)
33+
rh = tf.reshape(tf.tensor2d([[res.x, res.y, res.z, res.visibility]]).flatten(), [-1]);
34+
}
35+
console.log("pose1", pose.shape, face.shape, lh.shape, rh.shape);
36+
}
37+
catch (err) {
38+
console.log(err)
39+
}
40+
}, [])
41+
42+
useEffect(() => {
43+
if (typeof canvasElementRef.current !== typeof undefined && typeof canvasElementRef.current !== typeof undefined && typeof controlsRef.current !== typeof undefined) {
44+
console.log("activating")
45+
// Set the canvas context
46+
setCanvasCtx(canvasElementRef.current.getContext('2d'))
47+
// ------------------------------
48+
}
49+
}, [canvasElementRef, videoElementRef, controlsRef])
50+
51+
useEffect(() => {
52+
if (canvasCtx !== null) {
53+
tf.loadLayersModel('jsonmodel/model.json')
54+
.then(model => {
55+
setModel(model)
56+
})
57+
.catch(err => {
58+
console.log(err)
59+
})
60+
const holistic = new mpHolistic.Holistic({
61+
locateFile: (file) => {
62+
return `https://cdn.jsdelivr.net/npm/@mediapipe/holistic@` +
63+
`${mpHolistic.VERSION}/${file}`;
64+
}
65+
})
66+
holistic.setOptions({ minDetectionConfidence: 0.5, minTrackingConfidence: 0.5 })
67+
// set function to run on result of holistic model
68+
holistic.onResults(onResults);
69+
// ------------------------------------------------
70+
// Start the camera using mediapipe camera utility
71+
if (typeof videoElementRef.current !== "undefined" && videoElementRef.current !== null) {
72+
const camera = new Camera(videoElementRef.current, {
73+
onFrame: async () => {
74+
await holistic.send({ image: videoElementRef.current });
75+
},
76+
width: 480,
77+
height: 480
78+
});
79+
camera.start();
80+
}
81+
// --------------------------------------------------
82+
}
83+
}, [canvasCtx, onResults])
84+
585
return (
686
<div className="App">
7-
<header className="App-header">
8-
<img src={logo} className="App-logo" alt="logo" />
9-
<p>
10-
Edit <code>src/App.js</code> and save to reload.
11-
</p>
12-
<a
13-
className="App-link"
14-
href="https://reactjs.org"
15-
target="_blank"
16-
rel="noopener noreferrer"
17-
>
18-
Learn React
19-
</a>
20-
</header>
87+
<div className="container">
88+
<p >Webcam Input</p>
89+
<video ref={videoElementRef} ></video>
90+
91+
<canvas ref={canvasElementRef} width="480px" height="480px">
92+
93+
</canvas>
94+
<div className="loading">
95+
{
96+
showLoadingSpinner && <CircularProgress />
97+
}
98+
</div>
99+
<div ref={controlsRef} >
100+
</div>
101+
</div>
102+
21103
</div>
22-
);
104+
)
23105
}
24106

25107
export default App;

0 commit comments

Comments
 (0)