-
Notifications
You must be signed in to change notification settings - Fork 17
/
App.jsx
102 lines (88 loc) · 3.03 KB
/
App.jsx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import React, { useState, useEffect, useRef } from "react";
import * as tf from "@tensorflow/tfjs";
import "@tensorflow/tfjs-backend-webgl"; // set backend to webgl
import Loader from "./components/loader";
import { Webcam } from "./utils/webcam";
import { renderBoxes } from "./utils/renderBox";
import { non_max_suppression } from "./utils/nonMaxSuppression";
import "./style/App.css";
/**
* Function to detect image.
* @param {HTMLCanvasElement} canvasRef canvas reference
*/
function shortenedCol(arrayofarray, indexlist) {
return arrayofarray.map(function (array) {
return indexlist.map(function (idx) {
return array[idx];
});
});
}
const App = () => {
const [loading, setLoading] = useState({ loading: true, progress: 0 });
const videoRef = useRef(null);
const canvasRef = useRef(null);
const webcam = new Webcam();
// configs
const modelName = "yolov7";
const threshold = 0.80;
/**
* Function to detect every frame loaded from webcam in video tag.
* @param {tf.GraphModel} model loaded YOLOv7 tensorflow.js model
*/
const detectFrame = async (model) => {
const model_dim = [640, 640];
tf.engine().startScope();
const input = tf.tidy(() => {
const img = tf.image
.resizeBilinear(tf.browser.fromPixels(videoRef.current), model_dim)
.div(255.0)
.transpose([2, 0, 1])
.expandDims(0);
return img
});
await model.executeAsync(input).then((res) => {
res = res.arraySync()[0];
var detections = non_max_suppression(res);
const boxes = shortenedCol(detections, [0,1,2,3]);
const scores = shortenedCol(detections, [4]);
const class_detect = shortenedCol(detections, [5]);
renderBoxes(canvasRef, threshold, boxes, scores, class_detect);
tf.dispose(res);
});
requestAnimationFrame(() => detectFrame(model)); // get another frame
tf.engine().endScope();
};
useEffect(() => {
tf.loadGraphModel(`${window.location.origin}/${modelName}_web_model/model.json`, {
onProgress: (fractions) => {
setLoading({ loading: true, progress: fractions });
},
}).then(async (yolov7) => {
// Warmup the model before using real data.
const dummyInput = tf.ones(yolov7.inputs[0].shape);
await yolov7.executeAsync(dummyInput).then((warmupResult) => {
tf.dispose(warmupResult);
tf.dispose(dummyInput);
setLoading({ loading: false, progress: 1 });
webcam.open(videoRef, () => detectFrame(yolov7));
});
});
}, []);
console.warn = () => {};
return (
<div className="App">
<h2>Object Detection Using YOLOv7 & Tensorflow.js</h2>
{loading.loading ? (
<Loader>Loading model... {(loading.progress * 100).toFixed(2)}%</Loader>
) : (
<p> </p>
)}
<div className="content">
<video autoPlay playsInline muted ref={videoRef} id="frame"
/>
<canvas width={640} height={640} ref={canvasRef} />
</div>
</div>
);
};
export default App;