Skip to content

Commit

Permalink
Update Speech samples. (#257)
Browse files Browse the repository at this point in the history
  • Loading branch information
jmdobry committed Nov 21, 2016
1 parent db26fc9 commit 9280b68
Show file tree
Hide file tree
Showing 8 changed files with 113 additions and 197 deletions.
14 changes: 6 additions & 8 deletions speech/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ recognition technologies into developer applications.

View the [documentation][recognize_docs] or the [source code][recognize_code].

__Usage:__ `node recognize --help`
__Usage:__ `node recognize.js --help`

```
Commands:
Expand All @@ -42,15 +42,13 @@ Commands:
listen Detects speech in a microphone input stream.
Options:
--help Show help [boolean]
--help Show help [boolean]
Examples:
node recognize sync ./resources/audio.raw Detects speech in "./resources/audio.raw".
node recognize async ./resources/audio.raw Creates a job to detect speech in "./resources/audio.raw", and waits for
the job to complete.
node recognize stream ./resources/audio.raw Detects speech in "./resources/audio.raw" by streaming it to the Speech
API.
node recognize listen ./resources/audio.raw Detects speech in a microphone input stream.
node recognize.js sync ./resources/audio.raw
node recognize.js async ./resources/audio.raw
node recognize.js stream ./resources/audio.raw
node recognize.js listen
For more information, see https://cloud.google.com/speech/docs
```
Expand Down
6 changes: 1 addition & 5 deletions speech/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,13 @@
"license": "Apache Version 2.0",
"author": "Google Inc.",
"scripts": {
"test": "mocha -R spec -t 10000 --require intelli-espower-loader ../test/_setup.js test/*.test.js",
"system-test": "mocha -R spec -t 10000 --require intelli-espower-loader ../system-test/_setup.js system-test/*.test.js"
"test": "cd ..; npm run st -- speech/system-test/*.test.js"
},
"dependencies": {
"@google-cloud/speech": "^0.4.0",
"node-record-lpcm16": "^0.1.4",
"yargs": "^6.4.0"
},
"devDependencies": {
"mocha": "^3.1.2"
},
"engines": {
"node": ">=4.3.2"
}
Expand Down
13 changes: 5 additions & 8 deletions speech/quickstart.js
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,9 @@ const options = {
};

// Detects speech in the audio file
speechClient.recognize(fileName, options, (err, result) => {
if (err) {
console.error(err);
return;
}

console.log(`Transcription: ${result}`);
});
speechClient.recognize(fileName, options)
.then((results) => {
const transcription = results[0];
console.log(`Transcription: ${transcription}`);
});
// [END speech_quickstart]
148 changes: 81 additions & 67 deletions speech/recognize.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,54 +23,66 @@

'use strict';

const fs = require('fs');
const record = require('node-record-lpcm16');
const speech = require('@google-cloud/speech')();
const Speech = require('@google-cloud/speech');

// [START speech_sync_recognize]
function syncRecognize (filename, callback) {
// Detect speech in the audio file, e.g. "./resources/audio.raw"
speech.recognize(filename, {
function syncRecognize (filename) {
// Instantiates a client
const speech = Speech();

const config = {
// Configure these settings based on the audio you're transcribing
encoding: 'LINEAR16',
sampleRate: 16000
}, (err, results) => {
if (err) {
callback(err);
return;
}
};

console.log('Results:', results);
callback();
});
// Detects speech in the audio file, e.g. "./resources/audio.raw"
return speech.recognize(filename, config)
.then((results) => {
const transcription = results[0];
console.log(`Transcription: ${transcription}`);
return transcription;
});
}
// [END speech_sync_recognize]

// [START speech_async_recognize]
function asyncRecognize (filename, callback) {
// Detect speech in the audio file, e.g. "./resources/audio.raw"
speech.startRecognition(filename, {
function asyncRecognize (filename) {
// Instantiates a client
const speech = Speech();

const config = {
// Configure these settings based on the audio you're transcribing
encoding: 'LINEAR16',
sampleRate: 16000
}, (err, operation) => {
if (err) {
callback(err);
return;
}
};

operation
.on('error', callback)
.on('complete', (results) => {
console.log('Results:', results);
callback();
});
});
// Detects speech in the audio file, e.g. "./resources/audio.raw"
// This creates a recognition job that you can wait for now, or get its result
// later.
return speech.startRecognition(filename, config)
.then((results) => {
const operation = results[0];
// Get a Promise represention the final result of the job
return operation.promise();
})
.then((transcription) => {
console.log(`Transcription: ${transcription}`);
return transcription;
});
}
// [END speech_async_recognize]

// [START speech_streaming_recognize]
const fs = require('fs');

function streamingRecognize (filename, callback) {
// Instantiates a client
const speech = Speech();

const options = {
config: {
// Configure these settings based on the audio you're transcribing
encoding: 'LINEAR16',
sampleRate: 16000
}
Expand All @@ -90,9 +102,15 @@ function streamingRecognize (filename, callback) {
// [END speech_streaming_recognize]

// [START speech_streaming_mic_recognize]
function streamingMicRecognize (filename) {
const record = require('node-record-lpcm16');

function streamingMicRecognize () {
// Instantiates a client
const speech = Speech();

const options = {
config: {
// Configure these settings based on the audio you're transcribing
encoding: 'LINEAR16',
sampleRate: 16000
}
Expand All @@ -110,43 +128,39 @@ function streamingMicRecognize (filename) {
}
// [END speech_streaming_mic_recognize]

// The command-line program
var cli = require('yargs');
var utils = require('../utils');

var program = module.exports = {
syncRecognize: syncRecognize,
asyncRecognize: asyncRecognize,
streamingRecognize: streamingRecognize,
streamingMicRecognize: streamingMicRecognize,
main: function (args) {
// Run the command-line program
cli.help().strict().parse(args).argv;
}
};

cli
require(`yargs`)
.demand(1)
.command('sync <filename>', 'Detects speech in an audio file.', {}, function (options) {
program.syncRecognize(options.filename, utils.makeHandler(false));
})
.command('async <filename>', 'Creates a job to detect speech in an audio file, and waits for the job to complete.', {}, function (options) {
program.asyncRecognize(options.filename, utils.makeHandler(false));
})
.command('stream <filename>', 'Detects speech in an audio file by streaming it to the Speech API.', {}, function (options) {
program.streamingRecognize(options.filename, utils.makeHandler(false));
})
.command('listen', 'Detects speech in a microphone input stream.', {}, function () {
program.streamingMicRecognize();
})
.example('node $0 sync ./resources/audio.raw', 'Detects speech in "./resources/audio.raw".')
.example('node $0 async ./resources/audio.raw', 'Creates a job to detect speech in "./resources/audio.raw", and waits for the job to complete.')
.example('node $0 stream ./resources/audio.raw', 'Detects speech in "./resources/audio.raw" by streaming it to the Speech API.')
.example('node $0 listen', 'Detects speech in a microphone input stream.')
.command(
`sync <filename>`,
`Detects speech in an audio file.`,
{},
(opts) => syncRecognize(opts.filename)
)
.command(
`async <filename>`,
`Creates a job to detect speech in an audio file, and waits for the job to complete.`,
{},
(opts) => asyncRecognize(opts.filename)
)
.command(
`stream <filename>`,
`Detects speech in an audio file by streaming it to the Speech API.`,
{},
(opts) => streamingRecognize(opts.filename, () => {})
)
.command(
`listen`,
`Detects speech in a microphone input stream.`,
{},
streamingMicRecognize
)
.example(`node $0 sync ./resources/audio.raw`)
.example(`node $0 async ./resources/audio.raw`)
.example(`node $0 stream ./resources/audio.raw`)
.example(`node $0 listen`)
.wrap(120)
.recommendCommands()
.epilogue('For more information, see https://cloud.google.com/speech/docs');

if (module === require.main) {
program.main(process.argv.slice(2));
}
.epilogue(`For more information, see https://cloud.google.com/speech/docs`)
.help()
.strict()
.argv;
34 changes: 17 additions & 17 deletions speech/system-test/quickstart.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -26,33 +26,33 @@ const config = {
};

describe(`speech:quickstart`, () => {
let speechMock, SpeechMock;

it(`should detect speech`, (done) => {
const expectedFileName = `./resources/audio.raw`;
const expectedText = `how old is the Brooklyn Bridge`;

speechMock = {
recognize: (_fileName, _config, _callback) => {
const speechMock = {
recognize: (_fileName, _config) => {
assert.equal(_fileName, expectedFileName);
assert.deepEqual(_config, config);
assert.equal(typeof _callback, `function`);

speech.recognize(fileName, config, (err, transcription, apiResponse) => {
_callback(err, transcription, apiResponse);
assert.ifError(err);
assert.equal(transcription, expectedText);
assert.notEqual(apiResponse, undefined);
assert.equal(console.log.calledOnce, true);
assert.deepEqual(console.log.firstCall.args, [`Transcription: ${expectedText}`]);
done();
});

return speech.recognize(fileName, config)
.then((results) => {
const transcription = results[0];
assert.equal(transcription, expectedText);

setTimeout(() => {
assert.equal(console.log.callCount, 1);
assert.deepEqual(console.log.getCall(0).args, [`Transcription: ${expectedText}`]);
done();
}, 200);

return results;
});
}
};
SpeechMock = sinon.stub().returns(speechMock);

proxyquire(`../quickstart`, {
'@google-cloud/speech': SpeechMock
'@google-cloud/speech': sinon.stub().returns(speechMock)
});
});
});
6 changes: 3 additions & 3 deletions speech/system-test/recognize.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,14 @@ const text = `how old is the Brooklyn Bridge`;

describe(`speech:recognize`, () => {
it(`should run sync recognize`, () => {
assert.equal(run(`${cmd} sync ${filename}`, cwd), `Results: ${text}`);
assert.equal(run(`${cmd} sync ${filename}`, cwd), `Transcription: ${text}`);
});

it(`should run async recognize`, () => {
assert.equal(run(`${cmd} async ${filename}`, cwd), `Results: ${text}`);
assert.equal(run(`${cmd} async ${filename}`, cwd), `Transcription: ${text}`);
});

it(`should run streaming recognize`, () => {
assert.notEqual(run(`${cmd} stream ${filename}`, cwd).indexOf(text), -1);
assert.equal(run(`${cmd} stream ${filename}`, cwd).includes(text), true);
});
});
49 changes: 0 additions & 49 deletions speech/test/quickstart.test.js

This file was deleted.

Loading

0 comments on commit 9280b68

Please sign in to comment.