Skip to content

Commit

Permalink
Updated ML part.
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelbeutler committed Oct 23, 2022
1 parent 016a188 commit c551f80
Show file tree
Hide file tree
Showing 5 changed files with 786 additions and 41 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,5 @@ yarn-error.log*
# typescript
*.tsbuildinfo
next-env.d.ts
.env
crendentials.json
31 changes: 11 additions & 20 deletions components/IngredientsRecognitionModal.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,9 @@ import { Button, Image, Modal, Text } from "@nextui-org/react";
import React, { useState } from "react";
import Webcam from "react-webcam";

import "@tensorflow/tfjs-backend-cpu";
import "@tensorflow/tfjs-backend-webgl";
import { Ingredient } from "../models/ingredient";
import IngredientCard from "./IngredientCard";

const cocoSsd = require("@tensorflow-models/coco-ssd");

const videoConstraints = {
width: 1280,
height: 720,
Expand All @@ -20,27 +16,22 @@ const WebcamCapture: React.FC<{ ingredients: Ingredient[] }> = ({
}) => {
const [image, setImage] = useState<string>("");
const [predictions, setPredictions] = useState<
{ class: string; score: number }[]
{ name: string; score: number }[]
>([]);

const predict = async () => {
const img = document.getElementById("testImage") as HTMLImageElement;

// Load the model.
const model = await cocoSsd.load();

// Classify the image.
const predictions = await model.detect(img);
console.log(predictions);
setPredictions(predictions);
const predict = async (imageSrc: string) => {
fetch("/api/predict", {
method: "POST",
body: JSON.stringify({ image: imageSrc }),
}).then(res => res.json()).then((data) => setPredictions(data));
};

const webcamRef = React.useRef<Webcam>(null);
const capture = React.useCallback(async () => {
if (webcamRef.current && webcamRef.current !== null) {
const imageSrc = webcamRef.current.getScreenshot();
setImage(imageSrc as string);
predict();
predict(imageSrc as string);
}
}, [webcamRef]);
return (
Expand All @@ -56,15 +47,15 @@ const WebcamCapture: React.FC<{ ingredients: Ingredient[] }> = ({
<Button onClick={capture}>Aufnahme</Button>
<ul>
{predictions.map((prediction) => (
<li key={prediction.class}>
{prediction.class} - {Math.round(prediction.score * 100)}%
<li key={prediction.name}>
{prediction.name} - {Math.round(prediction.score * 100)}%
</li>
))}
</ul>
{ingredients
.filter((i) =>
predictions
.map((p) => p.class.toLowerCase())
.map((p) => p.name.toLowerCase())
.includes(i.name.toLowerCase())
)
.map((ingredient) => (
Expand Down Expand Up @@ -114,7 +105,7 @@ const IngredientsRecognitionModal: React.FC<
<WebcamCapture ingredients={ingredients} />
</Modal.Body>
<Modal.Footer>
<Button flat auto color="error" onClick={() => setVisible(false)}>
<Button flat auto color="error" onPress={() => setVisible(false)}>
Schliessen
</Button>
</Modal.Footer>
Expand Down
2 changes: 2 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
"lint": "next lint"
},
"dependencies": {
"@google-cloud/vision": "^3.0.1",
"@grpc/grpc-js": "^1.7.3",
"@nextui-org/react": "^1.0.0-beta.10",
"@tensorflow-models/coco-ssd": "^2.2.2",
"@tensorflow-models/mobilenet": "^2.1.0",
Expand Down
49 changes: 49 additions & 0 deletions pages/api/predict.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
import type { NextApiRequest, NextApiResponse } from "next";

import vision from "@google-cloud/vision";

const client: any = new vision.ImageAnnotatorClient();

const dataURLtoBlob = (dataurl: string) => {
let arr = dataurl.split(","),
mime = (arr as any)[0].match(/:(.*?);/)[1],
bstr = atob(arr[1]),
n = bstr.length,
u8arr = new Uint8Array(n);
while (n--) {
u8arr[n] = bstr.charCodeAt(n);
}
return new Blob([u8arr], { type: mime });
};

export default async function handler(
req: NextApiRequest,
res: NextApiResponse
) {
const image = JSON.parse(req.body).image as string;

console.log(dataURLtoBlob(image));
// console.log("Image", Buffer.from(image).toString("base64"));
const request = {
image: {
content: Buffer.from(await dataURLtoBlob(image).arrayBuffer()).toString(
"base64"
),
},
};

const [result] = await client.objectLocalization(request);
console.log(result);
const objects = result.localizedObjectAnnotations;
if (objects) {
objects.forEach((object: { name: string; score: number }) => {
console.log(`Name: ${object.name}`);
console.log(`Confidence: ${object.score}`);
});

res.json(objects);
} else {
res.json(result);
}
}

0 comments on commit c551f80

Please sign in to comment.