Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Error at faceapi.drawDetection(canvas, boxesWithText). #130

Closed
techravish opened this issue Nov 8, 2018 · 11 comments
Closed

Error at faceapi.drawDetection(canvas, boxesWithText). #130

techravish opened this issue Nov 8, 2018 · 11 comments

Comments

@techravish
Copy link

I am trying to implement the use case list on below site

https://itnext.io/realtime-javascript-face-tracking-and-face-recognition-using-face-api-js-mtcnn-face-detector-d924dd8b5740

i am able to do face detection but getting issue in face recognition with some investigation it looks like boxesWithText is giving empty output which is causing drawDetection to fail.

Error

Uncaught (in promise) TypeError: Cannot read property ‘x’ of undefined
at VM1409 face-api.min.js:1
at Array.forEach ()
at Object.t.drawDetection (VM1409 face-api.min.js:1)
at run2 (VM1410 index.js:75)

JS code

 $(document).ready(function() {
     
  run1()
})

async function run1() {

    const MODELS = "http://localhost:8000/Desktop/FaceID/Face%20Detection%20with%20webcam/models"; // Contains all the weights.

    await faceapi.loadSsdMobilenetv1Model(MODELS)
    await faceapi.loadFaceLandmarkModel(MODELS)
    await faceapi.loadFaceRecognitionModel(MODELS)
    

// try to access users webcam and stream the images
  // to the video element
 const videoEl = document.getElementById('inputVideo')
  navigator.getUserMedia(
    { video: {} },
    stream => videoEl.srcObject = stream,
    err => console.error(err)
)
}

async function run2() {
    
const mtcnnResults = await faceapi.ssdMobilenetv1(document.getElementById('inputVideo'))

overlay.width = 500
overlay.height = 400
const detectionsForSize = mtcnnResults.map(det => det.forSize(500, 400))

faceapi.drawDetection(overlay, detectionsForSize, { withScore: true })    


const input = document.getElementById('inputVideo')
const fullFaceDescriptions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()
    
    
const labels = ['sheldon','ravish']

const labeledFaceDescriptors = await Promise.all(
  labels.map(async label => {
    // fetch image data from urls and convert blob to HTMLImage element
    const imgUrl = `http://localhost:8000/Desktop/${label}.png`
    const img = await faceapi.fetchImage(imgUrl)
    
    // detect the face with the highest score in the image and compute it's landmarks and face descriptor
    const fullFaceDescription = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
    
    if (!fullFaceDescription) {
      throw new Error(`no faces detected for ${label}`)
    }
    
    const faceDescriptors = [fullFaceDescription.descriptor]
   // console.log(label)
    return new faceapi.LabeledFaceDescriptors(label, faceDescriptors)
  })
)

// 0.6 is a good distance threshold value to judge
// whether the descriptors match or not
const maxDescriptorDistance = 0.6
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, maxDescriptorDistance)
 //console.log("face matcher"+faceMatcher)
const results = fullFaceDescriptions.map(fd => faceMatcher.findBestMatch(fd.descriptor))


const boxesWithText = results.map((bestMatch, i) => {
  const box = fullFaceDescriptions[i].detection.box
  const text = bestMatch.toString()
  const boxWithText = new faceapi.BoxWithText(box, text)
})

faceapi.drawDetection(overlay, boxesWithText)


}

async function onPlay(videoEl) {
    run2()
    setTimeout(() => onPlay(videoEl))
} 
@justadudewhohacks
Copy link
Owner

justadudewhohacks commented Nov 12, 2018

You forgot the return statement in your map function:

const boxesWithText = results.map((bestMatch, i) => {
  const box = fullFaceDescriptions[i].detection.box
  const text = bestMatch.toString()
  const boxWithText = new faceapi.BoxWithText(box, text)
  return boxWithText
})

Fixed the gist...

@AndreasRef
Copy link

Hi, I am attempting to get this example from the blogpost at itnext.io running as well, but have a few issues. Does anyone have the full source code (HTML + JS) for this, so I can see what I am missing?

@logiticks-github
Copy link

logiticks-github commented Jun 28, 2019

const video = document.getElementById('video')

Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models'),
faceapi.nets.ssdMobilenetv1.loadFromUri('/models')
]).then(startVideo)

function startVideo() {
navigator.getUserMedia(
{ video: {} },
stream => video.srcObject = stream,
err => console.error(err)
)
}

video.addEventListener('play', async () => {
const canvas = faceapi.createCanvasFromMedia(video)
document.body.append(canvas)
const labeledFaceDescriptors = await loadLabeledImages()
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, 0.6)
const displaySize = { width: video.width, height: video.height }
faceapi.matchDimensions(canvas, displaySize)
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video).withFaceLandmarks().withFaceDescriptors();
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
if(resizedDetections.length>0){
const results = resizedDetections.map(d => faceMatcher.findBestMatch(d.descriptor))
results.forEach((result, i) => {
const box = resizedDetections[i].detection.box
const drawBox = new faceapi.draw.DrawBox(box, { label: result.label.toString() })
drawBox.draw(canvas)
})
}

// faceapi.draw.drawDetections(canvas, resizedDetections)
// faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
// faceapi.draw.drawFaceExpressions(canvas, resizedDetections)

}, 100)
})

function loadLabeledImages() {
const labels = ['Black Widow', 'Captain America', 'Captain Marvel', 'Hawkeye', 'Jim Rhodes', 'Thor', 'Tony Stark']
return Promise.all(
labels.map(async label => {
const descriptions = []
for (let i = 1; i <= 1; i++) {
const img = await faceapi.fetchImage(images/${label}.png)
const detections = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
descriptions.push(detections.descriptor)
debugger;
}
return new faceapi.LabeledFaceDescriptors(label, descriptions)
})
)
}

@thirukumars
Copy link

while running my code,this error shows,
2141script.js:32 Uncaught (in promise) TypeError: faceapi.draw.AgeAndGender is not a function
at script.js:32

please help me to solve this

@justadudewhohacks
Copy link
Owner

There is no faceapi.draw.AgeAndGender. Try this.

@thirukumars
Copy link

yeah bro its working.......
please tell where to i learn and understand the complete face-api,
please suggest to work well with face-api

@thirukumars
Copy link

script.js:66 Uncaught (in promise) ReferenceError: getFaceDetectorOptions is not defined
at updateReferenceImageResults (script.js:66)
at uploadImage (script.js:50)
this is my error

@thirukumars
Copy link

//const video = document.getElementById('video')
const input = document.getElementById('myImg')

Promise.all([
faceapi.nets.ssdMobilenetv1.loadFromUri('/models'),
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models'),
faceapi.nets.ageGenderNet.loadFromUri('/models')

])//.then(startVideo)

// function startVideo() {
// navigator.getUserMedia(
// { video: {} },
// stream => video.srcObject = stream,
// err => console.error(err)
// )
// }

// .then(load)
// async function load(){
// const imgFile = document.getElementById('myFileUpload').files[0]
// // create an HTMLImageElement from a Blob
// const img = await faceapi.bufferToImage(imgFile)
// document.getElementById('myImg').src = img.src
// const canvas = faceapi.createCanvasFromMedia(document.getElementById('myImg').src)
// const displaySize = { width: input.width, height: input.height }
// faceapi.matchDimensions(canvas, displaySize)

// }
// async function uploadImage() {

// const detections1 = await faceapi.detectSingleFace(input).withFaceLandmarks()
// const resizedResults = faceapi.resizeResults(detections1, displaySize)
// canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
// const detectionsWithLandmarks = await faceapi.detectSingleFace(input).withFaceLandmarks()
// faceapi.draw.drawFaceLandmarks(canvas, resizedResults)
// console.log(detections1)
// }
let faceMatcher=null;
async function uploadImage(e) {
const imgFile = document.getElementById('myFileUpload').files[0]
// create an HTMLImageElement from a Blob
const img = await faceapi.bufferToImage(imgFile)
document.getElementById('myImg').src = img.src
updateReferenceImageResults()
}

async function uploadQueryImage(e) {
const imgFile = document.getElementById('queryImgUploadInput').files[0]
// create an HTMLImageElement from a Blob
const img = await faceapi.bufferToImage(imgFile)
document.getElementById('queryImg').src = img.src
updateQueryImageResults()
}

async function updateReferenceImageResults() {
const inputImgEl = document.getElementById('#myImg')
const canvas = document.getElementById('#refImgOverlay')

const fullFaceDescriptions = await faceapi
.detectAllFaces(inputImgEl, getFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()

if (!fullFaceDescriptions.length) {
return
}

// create FaceMatcher with automatically assigned labels
// from the detection results for the reference image
faceMatcher = new faceapi.FaceMatcher(fullFaceDescriptions)

faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
const resizedResults = faceapi.resizeResults(fullFaceDescriptions, inputImgEl)
// draw boxes with the corresponding label as text
const labels = faceMatcher.labeledDescriptors
.map(ld => ld.label)
resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}
async function updateQueryImageResults() {
if (!faceMatcher) {
return
}

const inputImgEl = document.getElementById('#queryImg')
const canvas = document.getElementById('#queryImgOverlay')

const results = await faceapi
.detectAllFaces(inputImgEl, getFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()

faceapi.matchDimensions(canvas, inputImgEl)
// resize detection and landmarks in case displayed image is smaller than
// original size
const resizedResults = faceapi.resizeResults(results, inputImgEl)

resizedResults.forEach(({ detection, descriptor }) => {
const label = faceMatcher.findBestMatch(descriptor).toString()
const options = { label }
const drawBox = new faceapi.draw.DrawBox(detection.box, options)
drawBox.draw(canvas)
})
}

// video.addEventListener('play', () => {
// const canvas = faceapi.createCanvasFromMedia(video)
// document.body.append(canvas)
// const displaySize = { width: video.width, height: video.height }
// faceapi.matchDimensions(canvas, displaySize)
// setInterval(async () => {
// const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions().withAgeAndGender()
// const resizedDetections = faceapi.resizeResults(detections, displaySize)
// canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
// faceapi.draw.drawDetections(canvas, resizedDetections)
// faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
// faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
// resizedDetections.forEach(result=>{
// const{age,gender,genderProbability}=result
// new faceapi.draw.DrawTextField([
// ${faceapi.round(age,0)} years,
// ${gender}(${faceapi.round(genderProbability)})],
// result.detection.box.bottomRight).draw(canvas)
// })
// resizedDetections.forEach(res=>{
// const{age}=res
// new faceapi.draw.DrawTextField([
// ${faceapi.round(age,0)} years],
// res.detection.box.bottomLeft).draw(canvas)
// })

// faceapi.draw.drawAgeAndGender(canvas, resizedDetections)

// }, 1000)
//})

@thirukumars
Copy link

there are many comment line so,sorry for this
please give the solution guys

@thirukumars
Copy link

You forgot the return statement in your map function:

const boxesWithText = results.map((bestMatch, i) => {
  const box = fullFaceDescriptions[i].detection.box
  const text = bestMatch.toString()
  const boxWithText = new faceapi.BoxWithText(box, text)
  return boxWithText
})

Fixed the gist..

its not the proper solution,i have the same issue
drawDetection is not a function..
please give solution its frustrating

@bakonozadze
Copy link

bakonozadze commented Jul 13, 2020

can you help my ?? thats wrong in my code ?

its can detect face, write score but without recognition >(label name)

  <script>
    let forwardTimes = []
    let withFaceLandmarks = false
    let withBoxes = true

    function onChangeWithFaceLandmarks(e) {
      withFaceLandmarks = $(e.target).prop('checked')
    }

    function onChangeHideBoundingBoxes(e) {
      withBoxes = !$(e.target).prop('checked')
    }

    function updateTimeStats(timeInMs) {
      forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
      const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
      $('#time').val(`${Math.round(avgTimeInMs)} ms`)
      $('#fps').val(`${faceapi.utils.round(1000 / avgTimeInMs)}`)
    }

    async function onPlay(videoEl) {
      if(!videoEl.currentTime || videoEl.paused || videoEl.ended || !isFaceDetectionModelLoaded())
        return setTimeout(() => onPlay(videoEl))

      const options = getFaceDetectorOptions()

      const ts = Date.now()

      const drawBoxes = withBoxes
      const drawLandmarks = withFaceLandmarks

      let task = faceapi.detectAllFaces(videoEl, options)
      task = withFaceLandmarks ? task.withFaceLandmarks() : task
      const results = await task

      updateTimeStats(Date.now() - ts)

      const canvas = $('#overlay').get(0)
      const dims = faceapi.matchDimensions(canvas, videoEl, true)

      const resizedResults = faceapi.resizeResults(results, dims)
      if (drawBoxes) {
        faceapi.draw.drawDetections(canvas, resizedResults)
      }
      if (drawLandmarks) {
        faceapi.draw.drawFaceLandmarks(canvas, resizedResults)
      }

      setTimeout(() => onPlay(videoEl))
    }

    async function run() {
      // load face detection and face landmark models
      await changeFaceDetector(TINY_FACE_DETECTOR)
	      await faceapi.loadSsdMobilenetv1Model('/')
		      await faceapi.loadFaceRecognitionModel('/')
      await faceapi.loadFaceLandmarkModel('/')
      changeInputSize(416)

      // start processing frames
 

const labels = ['1','2']

const labeledFaceDescriptors = await Promise.all(
  labels.map(async label => {
    // fetch image data from urls and convert blob to HTMLImage element
    const imgUrl = `picture/${label}.JPG`
    const img = await faceapi.fetchImage(imgUrl)
    
    // detect the face with the highest score in the image and compute it's landmarks and face descriptor
    const fullFaceDescription = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()
    
    if (!fullFaceDescription) {
      throw new Error(`no faces detected for ${label}`)
    }
    
    const faceDescriptors = [fullFaceDescription.descriptor]
    console.log(label)
    return new faceapi.LabeledFaceDescriptors(label, faceDescriptors)
  })
)
const input = document.getElementById('inputVideo')
const fullFaceDescriptions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()
    
  
// 0.6 is a good distance threshold value to judge
// whether the descriptors match or not
const maxDescriptorDistance = 0.6
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, maxDescriptorDistance)
 console.log("face matcher"+faceMatcher)
const results = fullFaceDescriptions.map(fd => faceMatcher.findBestMatch(fd.descriptor))

	results.forEach((bestMatch, i) => {
	const box = fullFaceDescriptions[i].detection.box
	const text = bestMatch.toString()
	const drawBox = new faceapi.draw.DrawBox(box, { label: text })

	console.log("last")
	})

//  results   


     onPlay($('#inputVideo').get(0))
}
	
    function updateResults() {}

    $(document).ready(function() {
      renderNavBar('#navbar', 'video_face_tracking')
      initFaceDetectionControls()
      run()
    })

            </script>

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

6 participants