Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge branch 'FLUID-5675'

* FLUID-5675:
  FLUID-5675: Improved comment
  FLUID-5675: Linting
  FLUID-5675: Moved checkTTSSupport out of test file
  FLUID-5675: Updated comment
  FLUID-5675: Fixing typos
  FLUID-5675: Check if TTS actually available
  FLUID-5675: Refactored tts to be subcomponent
  FLUID-5675: Corrected comment in MockTTS
  FLUID-5675: Added a comment about handleEnd
  FLUID-5675: Added a comment about speechFn
  FLUID-5675: Added MockTTS for integration tests
  FLUID-5675: In progress commit for MockTTS
  • Loading branch information...
commit 70ea372fd461554567ae453c5523a4b30d8cc438 2 parents 4c2ba79 + da2a989
@amb26 amb26 authored
View
36 src/components/textToSpeech/js/TextToSpeech.js
@@ -28,6 +28,40 @@ var fluid_2_0 = fluid_2_0 || {};
return !!(window && window.speechSynthesis);
};
+ /**
+ * Ensures that TTS is supported in the browser, including cases where the
+ * feature is detected, but where the underlying audio engine is missing.
+ * For example in VMs on SauceLabs, the behaviour for browsers which report that the speechSynthesis
+ * API is implemented is for the `onstart` event of an utterance to never fire. If we don't receive this
+ * event within a timeout, this API's behaviour is to return a promise which rejects.
+ *
+ * @param delay {Number} A time in milliseconds to wait for the speechSynthesis to fire its onStart event
+ * by default it is 1000ms (1s). This is crux of the test, as it needs time to attempt to run the speechSynthesis.
+ * @return {fluid.promise} A promise which will resolve if the TTS is supported (the onstart event is fired within the delay period)
+ * or be rejected otherwise.
+ */
+ fluid.textToSpeech.checkTTSSupport = function (delay) {
+ var promise = fluid.promise();
+ if (fluid.textToSpeech.isSupported()) {
+ var toSpeak = new SpeechSynthesisUtterance(" "); // short text to attempt to speak
+ toSpeak.volume = 0; // mutes the Speech Synthesizer
+ var timeout = setTimeout(function () {
+ speechSynthesis.cancel();
+ promise.reject();
+ }, delay || 1000);
+ toSpeak.onstart = function () {
+ clearTimeout(timeout);
+ speechSynthesis.cancel();
+ promise.resolve();
+ };
+ speechSynthesis.speak(toSpeak);
+ } else {
+ setTimeout(promise.reject, 0);
+ }
+ return promise;
+ };
+
+
fluid.defaults("fluid.textToSpeech", {
gradeNames: ["fluid.standardRelayComponent", "autoInit"],
events: {
@@ -91,6 +125,8 @@ var fluid_2_0 = fluid_2_0 || {};
funcName: "fluid.textToSpeech.handleStart",
args: ["{that}"]
},
+ // The handleEnd method is assumed to be triggered asynchronously
+ // as it is processed/triggered by the mechanism voicing the utterance.
handleEnd: {
funcName: "fluid.textToSpeech.handleEnd",
args: ["{that}"]
View
34 src/framework/preferences/js/SelfVoicingEnactor.js
@@ -23,21 +23,31 @@ var fluid_2_0 = fluid_2_0 || {};
*******************************************************************************/
fluid.defaults("fluid.prefs.enactor.speak", {
- gradeNames: ["fluid.textToSpeech", "fluid.prefs.enactor", "autoInit"],
+ gradeNames: ["fluid.prefs.enactor", "autoInit"],
preferenceMap: {
"fluid.prefs.speak": {
"model.enabled": "default"
}
},
- invokers: {
- queueSpeech: {
- funcName: "fluid.prefs.enactor.speak.queueSpeech"
+ components: {
+ tts: {
+ type: "fluid.textToSpeech",
+ options: {
+ model: "{speak}.model",
+ invokers: {
+ queueSpeech: {
+ funcName: "fluid.prefs.enactor.speak.queueSpeech",
+ args: ["{that}", "fluid.textToSpeech.queueSpeech", "{arguments}.0", "{arguments}.1", "{arguments}.2"]
+ }
+ }
+ }
}
}
});
-
- fluid.prefs.enactor.speak.queueSpeech = function (that, text, interrupt, options) {
+ // Accepts a speechFn (either a function or function name), which will be used to perform the
+ // underlying queuing of the speech. This allows the SpeechSynthesis to be replaced (e.g. for testing)
+ fluid.prefs.enactor.speak.queueSpeech = function (that, speechFn, text, interrupt, options) {
// force a string value
var str = text.toString();
@@ -46,7 +56,11 @@ var fluid_2_0 = fluid_2_0 || {};
str.replace(/\s{2,}/gi, " ");
if (that.model.enabled && str) {
- fluid.textToSpeech.queueSpeech(that, str, interrupt, options);
+ if (typeof(speechFn) === "string") {
+ fluid.invokeGlobalFunction(speechFn, [that, str, interrupt, options]);
+ } else {
+ speechFn(that, str, interrupt, options);
+ }
}
};
@@ -68,7 +82,7 @@ var fluid_2_0 = fluid_2_0 || {};
handleSelfVoicing: {
funcName: "fluid.prefs.enactor.selfVoicing.handleSelfVoicing",
// Pass in invokers to force them to be resolved
- args: ["{that}.options.strings.welcomeMsg", "{that}.queueSpeech", "{that}.readFromDOM", "{that}.cancel", "{arguments}.0"]
+ args: ["{that}.options.strings.welcomeMsg", "{tts}.queueSpeech", "{that}.readFromDOM", "{tts}.cancel", "{arguments}.0"]
},
readFromDOM: {
funcName: "fluid.prefs.enactor.selfVoicing.readFromDOM",
@@ -102,14 +116,14 @@ var fluid_2_0 = fluid_2_0 || {};
var nodes = elm.contents();
fluid.each(nodes, function (node) {
if (node.nodeType === fluid.prefs.enactor.selfVoicing.nodeType.TEXT_NODE && node.nodeValue) {
- that.queueSpeech(node.nodeValue);
+ that.tts.queueSpeech(node.nodeValue);
}
if (node.nodeType === fluid.prefs.enactor.selfVoicing.nodeType.ELEMENT_NODE && window.getComputedStyle(node).display !== "none") {
if (node.nodeName === "IMG") {
var altText = node.getAttribute("alt");
if (altText) {
- that.queueSpeech(altText);
+ that.tts.queueSpeech(altText);
}
} else {
fluid.prefs.enactor.selfVoicing.readFromDOM(that, node);
View
1  tests/component-tests/textToSpeech/html/TextToSpeech-test.html
@@ -12,6 +12,7 @@
<script type="text/javascript" src="../../../../src/lib/jquery/core/js/jquery.js"></script>
<script type="text/javascript" src="../../../../src/framework/core/js/FluidDocument.js"></script>
<script type="text/javascript" src="../../../../src/framework/core/js/Fluid.js"></script>
+ <script type="text/javascript" src="../../../../src/framework/core/js/FluidPromises.js"></script>
<script type="text/javascript" src="../../../../src/framework/core/js/FluidIoC.js"></script>
<script type="text/javascript" src="../../../../src/framework/core/js/DataBinding.js"></script>
<script type="text/javascript" src="../../../../src/framework/core/js/FluidView.js"></script>
View
114 tests/component-tests/textToSpeech/js/MockTTS.js
@@ -0,0 +1,114 @@
+/*
+Copyright 2015 OCAD University
+
+Licensed under the Educational Community License (ECL), Version 2.0 or the New
+BSD license. You may not use this file except in compliance with one these
+Licenses.
+
+You may obtain a copy of the ECL 2.0 License and BSD License at
+https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
+
+*/
+
+// Declare dependencies
+/* global fluid */
+
+(function () {
+ "use strict";
+
+ // Mocks the fluid.textToSpeech component, removing calls to the
+ // Web Speech API. This will allow for tests to run in browsers
+ // that don't support the Web Speech API.
+ fluid.defaults("fluid.mock.textToSpeech", {
+ gradeNames: ["fluid.textToSpeech", "autoInit"],
+ members: {
+ // An archive of all the calls to queueSpeech.
+ // Will contain an ordered set of objects -- {text: String, options: Object}.
+ speechRecord: [],
+ // An archive of all the events fired
+ // Will contain a key/value pairing where key is the name of the event and the
+ // value is the number of times the event was fired.
+ eventRecord: {}
+ },
+ listeners: {
+ "onStart.recordEvent": {
+ listener: "{that}.recordEvent",
+ args: ["onStart"]
+ },
+ "onStop.recordEvent": {
+ listener: "{that}.recordEvent",
+ args: ["onStop"]
+ },
+ "onSpeechQueued.recordEvent": {
+ listener: "{that}.recordEvent",
+ args: ["onSpeechQueued"]
+ }
+ },
+ invokers: {
+ queueSpeech: {
+ funcName: "fluid.mock.textToSpeech.queueSpeech",
+ args: ["{that}", "{that}.handleStart", "{that}.handleEnd", "{that}.speechRecord", "{arguments}.0", "{arguments}.1", "{arguments}.2"]
+ },
+ cancel: {
+ funcName: "fluid.mock.textToSpeech.cancel",
+ args: ["{that}", "{that}.handleEnd"]
+ },
+ pause: {
+ "this": null,
+ "method": null,
+ listener: "{that}.events.onPause.fire"
+ },
+ resume: {
+ "this": null,
+ "method": null,
+ listener: "{that}.events.onResume.fire"
+ },
+ getVoices: {
+ "this": null,
+ "method": null,
+ listener: "fluid.identity",
+ args: []
+ },
+ recordEvent: {
+ funcName: "fluid.mock.textToSpeech.recordEvent",
+ args: ["{that}.eventRecord", "{arguments}.0"]
+ }
+ }
+ });
+
+ fluid.mock.textToSpeech.queueSpeech = function (that, handleStart, handleEnd, speechRecord, text, interrupt, options) {
+ if (interrupt) {
+ that.cancel();
+ }
+
+ var record = {
+ text: text,
+ interrupt: !!interrupt
+ };
+
+ if (options) {
+ record.options = options;
+ }
+
+ speechRecord.push(record);
+
+ that.queue.push(text);
+ that.events.onSpeechQueued.fire(text);
+
+ // mocking speechSynthesis speak
+ handleStart();
+ // using setTimeout to preserve asynchronous behaviour
+ setTimeout(handleEnd, 0);
+
+ };
+
+ fluid.mock.textToSpeech.cancel = function (that, handleEnd) {
+ that.queue = [];
+ handleEnd();
+ };
+
+ fluid.mock.textToSpeech.recordEvent = function (eventRecord, name) {
+ eventRecord[name] = (eventRecord[name] || 0) + 1;
+ };
+
+})();
View
24 tests/component-tests/textToSpeech/js/TextToSpeechTests.js
@@ -55,6 +55,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
jqUnit.assertFalse("Nothing should be pending", that.model.pending);
jqUnit.assertFalse("Shouldn't be paused", that.model.paused);
jqUnit.assertDeepEq("The queue should be empty", [], that.queue);
+ that.cancel();
jqUnit.start();
},
args: ["{that}"]
@@ -88,16 +89,25 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
},
args: ["{that}"]
},
- "onStop.end": "jqUnit.start"
+ "onStop.end": {
+ listener: function (that) {
+ that.cancel();
+ jqUnit.start();
+ },
+ args: ["{that}"]
+ }
}
});
// only run the tests in browsers that support the Web Speech API for speech synthesis
- if (!fluid.textToSpeech.isSupported()) {
- jqUnit.test("No Tests Run", function () {
- jqUnit.assert("Does not support the SpeechSynthesis");
+
+ fluid.tests.textToSpeech.runNoTTSTests = function () {
+ jqUnit.test("No Tests Run: No TTS Support", function () {
+ jqUnit.assert("Does not support SpeechSynthesis");
});
- } else {
+ };
+
+ fluid.tests.textToSpeech.runTTSTests = function () {
jqUnit.test("Initialization", function () {
var that = fluid.tests.textToSpeech();
@@ -123,6 +133,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
that.queueSpeech("Testing pause and resume events");
});
}
- }
+ };
+ var runTests = fluid.textToSpeech.checkTTSSupport();
+ runTests.then(fluid.tests.textToSpeech.runTTSTests, fluid.tests.textToSpeech.runNoTTSTests);
})();
View
1  tests/framework-tests/preferences/html/SelfVoicingEnactor-test.html
@@ -34,6 +34,7 @@
<script type="text/javascript" src="../../../test-core/utils/js/IoCTestUtils.js"></script>
<!-- These are tests that have been written using this page as data -->
+ <script type="text/javascript" src="../../../component-tests/textToSpeech/js/MockTTS.js"></script>
<script type="text/javascript" src="../js/SelfVoicingEnactorTests.js"></script>
</head>
View
135 tests/framework-tests/preferences/js/SelfVoicingEnactorTests.js
@@ -10,7 +10,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
*/
// Declare dependencies
-/* global fluid, jqUnit, speechSynthesis */
+/* global fluid, jqUnit */
(function ($) {
"use strict";
@@ -23,65 +23,24 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
fluid.defaults("fluid.tests.prefs.enactor.speakEnactor", {
gradeNames: ["fluid.prefs.enactor.speak", "autoInit"],
- members: {
- eventRecord: {},
- speakQueue: []
- },
model: {
- enabled: true,
- utteranceOpts: {
- volume: 0
- }
+ enabled: true
},
- listeners: {
- "onCreate.cleanUp": "fluid.tests.prefs.enactor.speakEnactor.cleanUp",
- "onStart.record": {
- listener: "{that}.record",
- args: ["onStart"]
- },
- "onStop.record": {
- listener: "{that}.record",
- args: ["onStop"]
- },
- "onSpeechQueued.record": {
- listener: "{that}.record",
- args: ["onSpeechQueued"]
- },
- "onSpeechQueued.recordText": "{that}.recordText"
- },
- invokers: {
- record: {
- funcName: "fluid.tests.prefs.enactor.speakEnactor.record",
- args: ["{that}", "{arguments}.0"]
- },
- recordText: {
- funcName: "fluid.tests.prefs.enactor.speakEnactor.recordText",
- args: ["{that}", "{arguments}.0"]
- },
- clearRecords: {
- funcName: "fluid.tests.prefs.enactor.speakEnactor.clearRecords",
- args: ["{that}"]
+ components: {
+ tts: {
+ type: "fluid.mock.textToSpeech",
+ options: {
+ invokers: {
+ queueSpeech: {
+ funcName: "fluid.mock.textToSpeech.queueSpeech",
+ args: ["{that}", "{that}.handleStart", "{that}.handleEnd", "{that}.speechRecord", "{arguments}.0", "{arguments}.1", "{arguments}.2"]
+ }
+ }
+ }
}
}
});
- fluid.tests.prefs.enactor.speakEnactor.cleanUp = function () {
- speechSynthesis.cancel();
- };
-
- fluid.tests.prefs.enactor.speakEnactor.record = function (that, name) {
- that.eventRecord[name] = (that.record[name] || 0) + 1;
- };
-
- fluid.tests.prefs.enactor.speakEnactor.recordText = function (that, text) {
- that.speakQueue.push(text);
- };
-
- fluid.tests.prefs.enactor.speakEnactor.clearRecords = function (that) {
- that.eventRecord = {};
- that.speakQueue = {};
- };
-
fluid.defaults("fluid.tests.speakTests", {
gradeNames: ["fluid.test.testEnvironment", "autoInit"],
components: {
@@ -108,9 +67,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
speaking: false,
pending: false,
paused: false,
- utteranceOpts: {
- volume: 0
- }
+ utteranceOpts: {}
}
},
modules: [{
@@ -119,28 +76,30 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
expect: 3,
name: "Start-Stop flow",
sequence: [{
- func: "{speak}.queueSpeech",
+ func: "{speak}.tts.queueSpeech",
args: ["{that}.options.testOptions.sampleText"]
}, {
listener: "fluid.tests.speakTester.verifyRecords",
args: [
"{speak}",
"{that}.options.testOptions.startStopFireRecord",
- ["{that}.options.testOptions.sampleText"],
+ [{
+ text: "{that}.options.testOptions.sampleText",
+ interrupt: false
+ }],
"{that}.options.testOptions.stoppedModel"
],
spec: {priority: "last"},
- event: "{speak}.events.onStop"
+ event: "{speak}.tts.events.onStop"
}]
}]
}]
});
- fluid.tests.speakTester.verifyRecords = function (that, expectedEvents, expectedText, expectedModel) {
- jqUnit.assertDeepEq("The events should have fired correctly", expectedEvents, that.eventRecord);
- jqUnit.assertDeepEq("The text to be spoken should have been queued correctly", expectedText, that.speakQueue);
+ fluid.tests.speakTester.verifyRecords = function (that, expectedEvents, expectedSpeechRecord, expectedModel) {
+ jqUnit.assertDeepEq("The events should have fired correctly", expectedEvents, that.tts.eventRecord);
+ jqUnit.assertDeepEq("The text to be spoken should have been queued correctly", expectedSpeechRecord, that.tts.speechRecord);
jqUnit.assertDeepEq("The model should be reset correctly", expectedModel, that.model);
- that.clearRecords();
};
/*******************************************************************************
@@ -148,10 +107,29 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
*******************************************************************************/
fluid.defaults("fluid.tests.prefs.enactor.selfVoicingEnactor", {
- gradeNames: ["fluid.prefs.enactor.selfVoicing", "fluid.tests.prefs.enactor.speakEnactor", "autoInit"],
+ gradeNames: ["fluid.prefs.enactor.selfVoicing", "autoInit"],
model: {
enabled: false
},
+ components: {
+ tts: {
+ type: "fluid.mock.textToSpeech",
+ options: {
+ invokers: {
+ // put back the selfVoicingEnactors own queueSpeech method, but pass in the
+ // mock queueSpeech function as the speechFn
+ queueSpeech: {
+ funcName: "fluid.prefs.enactor.speak.queueSpeech",
+ args: ["{that}", "{that}.mockQueueSpeech", "{arguments}.0", "{arguments}.1", "{arguments}.2"]
+ },
+ mockQueueSpeech: {
+ funcName: "fluid.mock.textToSpeech.queueSpeech",
+ args: ["{arguments}.0", "{that}.handleStart", "{that}.handleEnd", "{that}.speechRecord", "{arguments}.1", "{arguments}.2", "{arguments}.3"]
+ }
+ }
+ }
+ }
+ },
invokers: {
toggle: {
changePath: "enabled",
@@ -177,9 +155,9 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
gradeNames: ["fluid.test.testCaseHolder", "autoInit"],
testOptions: {
expectedText: [
- "{selfVoicing}.options.strings.welcomeMsg",
- "Reading text from DOM",
- "no image"
+ {text: "{selfVoicing}.options.strings.welcomeMsg", interrupt: true},
+ {text: "Reading text from DOM", interrupt: false},
+ {text: "no image", interrupt: false}
]
},
modules: [{
@@ -194,30 +172,21 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt
listener: "fluid.tests.selfVoicingTester.verifySpeakQueue",
args: ["{selfVoicing}", "{that}.options.testOptions.expectedText"],
spec: {priority: "last"},
- event: "{selfVoicing}.events.onStop"
+ event: "{selfVoicing}.tts.events.onStop"
}]
}]
}]
});
fluid.tests.selfVoicingTester.verifySpeakQueue = function (that, expectedText) {
- jqUnit.assertDeepEq("The text to be spoken should have been queued correctly", expectedText, that.speakQueue);
- that.clearRecords();
+ jqUnit.assertDeepEq("The text to be spoken should have been queued correctly", expectedText, that.tts.speechRecord);
};
$(document).ready(function () {
- // only run the tests in browsers that support the Web Speech API for speech synthesis
- if (!fluid.textToSpeech.isSupported()) {
- jqUnit.test("No Tests Run", function () {
- jqUnit.assert("Does not support the SpeechSynthesis Interface");
- });
-
- } else {
- fluid.test.runTests([
- "fluid.tests.speakTests",
- "fluid.tests.selfVoicingTests"
- ]);
- }
+ fluid.test.runTests([
+ "fluid.tests.speakTests",
+ "fluid.tests.selfVoicingTests"
+ ]);
});
})(jQuery);
Please sign in to comment.
Something went wrong with that request. Please try again.