Skip to content

Commit

Permalink
updated C09 with better recognition - added face service and now draw…
Browse files Browse the repository at this point in the history
… rectangle around first face
  • Loading branch information
rddill-IBM committed Feb 2, 2018
1 parent 36a5060 commit 59713a5
Show file tree
Hide file tree
Showing 7 changed files with 285 additions and 100 deletions.
78 changes: 44 additions & 34 deletions Chapter09/Documentation/answers/images_complete.js
Expand Up @@ -54,61 +54,71 @@ exports.upload= function(req, res, next){
exports.classify= function(req, res, next){

req.pipe(req.busboy);
req.busboy.on('file', function(fieldname, file, filename) {
req.busboy.on('file', function(fieldname, file, filename)
{
// not all file systems are friendly to names with spaces in them.
// if this name has spaces in it, replace them with an underscore.
var fileName = filename.replace(/ /g,"_");
var newFile = path.join(path.dirname(require.main.filename),'images',fileName);
var fd = fs.openSync(newFile, 'w');
var fstream = fs.createWriteStream(newFile, {fd: fd});
file.pipe(fstream);
// now that we have the image stored on the server, send it to watson
fstream.on('close', function () {
var params = {images_file: fs.createReadStream(newFile), classifier_ids: [vr_classifier] };
var visual_recognition = watson.visual_recognition({
api_key: apiKey,
version: 'v3', version_date: '2016-05-20'
});
visual_recognition.classify(params, function(err, classify_results) {
// if there is an error, log it.
// this should be extended to include a res.send() so the browser does not repeat the request
// which it will do on requests with no response
if (err) {console.log(err);}
else
// the request was successful, send the results back to the browser
{ res.send(classify_results); }
});
} );
var fileName = filename.replace(/ /g,"_");
var newFile = path.join(path.dirname(require.main.filename),'images',fileName);
var fd = fs.openSync(newFile, 'w');
var fstream = fs.createWriteStream(newFile, {fd: fd});
var _res = {};
file.pipe(fstream);
// now that we have the image stored on the server, send it to watson
fstream.on('close', function ()
{
var params = {images_file: fs.createReadStream(newFile), classifier_ids: [vr_classifier] };
var visual_recognition = watson.visual_recognition(
{
api_key: apiKey,
version: 'v3', version_date: '2016-05-20'
});
visual_recognition.detectFaces(params, function(err, faces)
{
if (err) {console.log(err); res.send({'results': 'failed', 'where': 'detectFaces', 'error': err});}
else
{
console.log('detecFaces successful: '+JSON.stringify(faces)); _res.faces = faces;
var params = {images_file: fs.createReadStream(newFile), classifier_ids: [vr_classifier] };
visual_recognition.classify(params, function(err, classify_results)
{
if (err) {console.log(err); res.send({'results': 'failed', 'where': 'classify', 'error': err});}
else
{
console.log('classify successful: '+JSON.stringify(classify_results)); _res.classify = classify_results;
res.send({'results': 'success', 'data': _res});
}
});
}
});
});
}
});
}

/**
* find looks in a collection of images to find those images which most closely match the provided image
* detect faces looks at an image to find the faces in it
* @param {object} req - nodejs object with the request information
* req.body holds post parameters
* req.body.collection - the id of the collection to use
* @param {object} res - nodejs response object
* @param {object} next - nodejs next object - used if this routine does not provide a response
*/
exports.find= function(req, res, next){
exports.detect= function(req, res, next){
// get rid of blanks in the file name
var imageName = req.body.image.replace(/ /g,"_");
var newFile = path.join(path.dirname(require.main.filename),'images',imageName);
// save the collection name
var collectionName = req.body.collection;
// read in the (previously transferred) file name
var params = {image_file: fs.createReadStream(newFile), collection_id: collectionName };
// set the visual_recognition parameters
var visual_recognition = watson.visual_recognition({
api_key: apiKey,
version: 'v3', version_date: '2016-05-20'
});
// request similar images
visual_recognition.findSimilar(params, function(err, similar_results) {
// on error, log the erro
if (err) {console.log(err);}
// request face recognition
visual_recognition.detectFaces(params, function(err, response) {
// on error, log the erro
if (err) {console.log(err); res.send({'results': 'failed', 'where': 'detectFaces', 'error': err});}
else
// send the results back to the browser
{res.send(similar_results);}
{console.log(JSON.stringify(response)); res.send({'results': 'success', 'data': response});}
});
}
4 changes: 2 additions & 2 deletions Chapter09/Documentation/answers/pageStyles_complete.css
Expand Up @@ -6,8 +6,8 @@

.imageReceiver {
background-color: #E0F8E0;
width: 200px;
height: 200px
min-width: 200px;
min-height: 200px;
}
.dd_upload {
outline: 2px solid #298A08;
Expand Down
105 changes: 81 additions & 24 deletions Chapter09/Documentation/answers/z2c-image_complete.js
Expand Up @@ -18,6 +18,7 @@ var b_Droppable, _url, _image, droppedFiles, $form, c_res ;

// visual recognition has an image limit of 2Mb
var maxSize = 2097152;
var _factor;

/**
* initialize the visual recognition page.
Expand Down Expand Up @@ -86,7 +87,9 @@ function initiateVR()
// remove drag target highlighting when the mouse leaves the drag area
_image.on('dragleave dragend drop', function() { _image.removeClass('dragover'); });
// do the following when the image is dragged in and dropped
_image.on('drop', function(e) { droppedFiles = e.originalEvent.dataTransfer.files;
_image.on('drop', function(e)
{
droppedFiles = e.originalEvent.dataTransfer.files;
console.log("dropped file name: "+droppedFiles[0].name);
// build a table to display image information
var fileSpecs = "<table width='90%'><tr><td>File Name</td><td>"+droppedFiles[0].name+"</td></tr>";
Expand All @@ -100,12 +103,37 @@ function initiateVR()
c_res.empty();
// display the table
c_res.append(fileSpecs);
// display the image
// load the image to get the original size of the inbound image.
// we need this information to correctly draw a box around the face later on.
var reader = new FileReader();
reader.onload = function(e) {
var __image = '<center><img id="fileToLoad" src="' + e.target.result + '", height=200 /></center>'
_image.empty();
_image.append(__image); }
var __image = '<img id="fileToLoad" src="' + e.target.result + '", height=200 />'
_image.empty(); _image.append(__image); _image.hide();
var _img = $("#fileToLoad");
window.setTimeout(function()
{
// the display window is 200 pixels high. calculate the multiplication factor to fit this window and save it for later use
_factor = 200/_img[0].naturalHeight;
// calculate the target width
var _width = _factor*_img[0].naturalWidth;
var _height = 200;
// create a drawing canvas and center it in the imageReceiver div
__image = '<center><canvas id="fileCanvas" width="'+_width+'" height="'+_height+'"></canvas></center>'
// empty the div of the image we just loaded and append this canvas to the now empty div
_image.empty(); _image.append(__image);
// get the drawing context for the canvas
var ctx = $("#fileCanvas")[0].getContext("2d");
// create a drawable image
var imageObj = new Image();
// link the source to the image dropped on the imageReceiver div
imageObj.src = e.target.result;
// when the image has loaded into memory, draw it
imageObj.onload = function () {
ctx.drawImage(imageObj, 0, 0, _width, _height);
}
_image.show();
}, 100);
}
reader.readAsDataURL(droppedFiles[0]);
});
// update the image area css
Expand All @@ -122,29 +150,58 @@ function initiateVR()
*/
function displayImageClassificationResults(_target, _data)
{
// empty the html target area
_target.empty();
console.log("displayImageClassificationResults entered with: "+_data);
// turn the returned string back into a JSON object
var imageResults = JSON.parse(_data);
console.log("displayImageClassificationResults parsed results: ",imageResults);
// empty the html target area
_target.empty();
// turn the returned string back into a JSON object
var imageResults = JSON.parse(_data);
// check to see if the request for face detection and clssification were both successful
if (imageResults.results !== 'success')
{
_target.append('Visual Recognition failed at: '+imageResults.where+' with error: <br/>'+imageResults.err.message);
return;
}
// check to make sure that there was at least one face in the provided image
if (imageResults.data.faces.images[0].faces.length === 0)
{
_target.append('There is no face in the provided image: '+imageResults.data.faces.images[0].image);
return;
}
else
{
// get the rectangle of the first face found in the image
var _imgRect = imageResults.data.faces.images[0].faces[0].face_location;
// the top value provided is consistently about 1.3 times the actual vertical offset of the face
// the following correction factor is to address that bug
var vert_correction = 0.3;
// get the drawing context of the canvas
var ctx = $("#fileCanvas")[0].getContext("2d");
// create a rectangle to fit around the face
ctx.rect(_factor*_imgRect.left, vert_correction*_factor*_imgRect.top, _factor*_imgRect.width, _factor*_imgRect.height);
// set the line width to 6 pixels
ctx.lineWidth="6";
// set the line color to blue
ctx.strokeStyle="blue";
// draw the rectangle.
ctx.stroke();

// create a display table
var _tbl = "<table width=90%><tr><th>Image Class</th><th>Probability</th><tr>";
}

var _image = imageResults.images[0].image;
// iterate through the classification results, displaying one table row for each result row
if (imageResults.images[0].classifiers.length === 0)
{ _tbl += "<tr><td>No Results with higher than 50% probability</td></tr>"}
else
{
for (each in imageResults.images[0].classifiers[0].classes)
// create a display table
var _tbl = "<table width=90%><tr><th>Image Class</th><th>Probability</th><tr>";

var _image = imageResults.data.classify.images[0].image;
// iterate through the classification results, displaying one table row for each result row
if (imageResults.data.classify.images[0].classifiers.length === 0)
{ _tbl += "<tr><td>No Results with higher than 50% probability</td></tr>"}
else
{
(function (_idx, _obj) {
_tbl += '<tr class="showFocus"><td class="col-md-6"><b>'+_obj[_idx].class+'</b></td><td>'+_obj[_idx].score+'</td></tr>';
})(each, imageResults.images[0].classifiers[0].classes)
for (each in imageResults.data.classify.images[0].classifiers[0].classes)
{
(function (_idx, _obj) {
_tbl += '<tr class="showFocus"><td class="col-md-6"><b>'+_obj[_idx].class+'</b></td><td>'+_obj[_idx].score+'</td></tr>';
})(each, imageResults.data.classify.images[0].classifiers[0].classes)
}
}
}
// close the table
_tbl += "</table>";
// and append it to the target.
Expand Down
2 changes: 1 addition & 1 deletion Chapter09/controller/restapi/router.js
Expand Up @@ -34,4 +34,4 @@ router.post('/auth/logout*', auth.logout);

router.post('/images/upload', images.upload);
router.post('/images/classify', images.classify);
router.post('/images/find', images.find);
router.post('/images/detect', images.detect);

0 comments on commit 59713a5

Please sign in to comment.