Permalink
Browse files

Improve arguments for async_testing.run and command line flags

Now async_testing.run can take any number of arguments, which can be any
one of:

+ string
+ options object
+ ARGV array

The last argument can still be a callback.

The order matters, latter settings override earlier ones. now you can

run a file:
  async_testing.run('myFile.js');

add defaults:
  async_testing.run({parallel: true}, 'myFile.js');

use command line arguments:
  async_testing.run(process.ARGV);

use command line arguments with defaults:
  async_testing.run({parallel: true}, process.ARGV);

overwrite command line arguments:
  async_testing.run(process.ARGV, {parallel: true});

I also worked on improving the built-in option parser a little bit and
testing it (my tests are not comprehensive yet).  I'm beginning to think
maintaining a built-in option parser is a pain and that we should bundle
one.
  • Loading branch information...
1 parent e4e3779 commit 0643a38013154691d496777aa3ab79a3fca7e9a1 Benjamin Thomas committed Oct 29, 2010
Showing with 410 additions and 210 deletions.
  1. +20 −43 lib/console-runner.js
  2. +168 −125 lib/running.js
  3. +3 −9 lib/web-runner.js
  4. +166 −0 test/test-parseRunArguments.js
  5. +53 −33 todo.txt
View
63 lib/console-runner.js
@@ -10,35 +10,41 @@ var assert = require('assert')
exports.name = 'Console';
exports.runnerFlag =
- { description: 'run the tests and show the output in the terminal'
+ { description: 'use the console runner for running tests from a terminal'
, longFlag: 'console'
+ , key: 'runner'
+ , value: 'Console'
, shortFlag: 'c'
};
exports.optionsFlags =
[ { longFlag: 'log-level'
, shortFlag: 'l'
+ , key: 'verbosity'
, description: '0 => succinct, 1 => default, 2 => full stack traces'
- , varName: 'level'
+ , takesValue: 'level'
}
- , { longFlag: null
- , shortFlag: '0'
+ , { shortFlag: '0'
+ , key: 'verbosity'
+ , value: 0
, description: 'set log level to 0'
}
- , { longFlag: null
- , shortFlag: '1'
+ , { shortFlag: '1'
+ , key: 'verbosity'
+ , value: 1
, description: 'set log level to 1'
}
- , { longFlag: null
- , shortFlag: '2'
+ , { shortFlag: '2'
+ , key: 'verbosity'
+ , value: 2
, description: 'set log level to 2'
}
, { longFlag: 'all'
, shortFlag: 'a'
+ , key: 'printSuccesses'
, description: 'don\'t supress information about passing tests'
}
, { longFlag: 'no-color'
- , shortFlag: 'b'
, description: 'don\'t use colored output'
}
];
@@ -59,43 +65,14 @@ exports.run = function(list, options, callback) {
, bold = function(str){return "\033[1m" + str + "\033[22m"}
;
- // clean up and parse options
- if ('0' in options) {
- options.verbosity = 0;
- delete options['0'];
- }
- if ('1' in options) {
- options.verbosity = 1;
- delete options['1'];
- }
- if ('2' in options) {
- options.verbosity = 2;
- delete options['2'];
- }
- if ('log-level' in options) {
- options.verbosity = options['log-level'][0];
- delete options['log-level'];
- }
- if (typeof options.verbosity == 'undefined') {
+ if (!('verbosity' in options)) {
options.verbosity = 1;
}
- if (typeof options.parallel == 'undefined') {
- options.parallel = false;
- }
- if (options['no-color']) {
- red = green = yellow = function(str) { return str; };
- delete options['no-color'];
- }
- if (options.all) {
- options.printSuccesses = true;
- delete options.all;
- }
- if (typeof options['suites-parallel'] == 'undefined') {
- options['suites-parallel'] = false;
+ // clean up and parse options
+ if (options.noColor) {
+ red = green = yellow = function(str) { return str; };
}
- options.suitesParallel = options['suites-parallel'];
- delete options['suites-parallel'];
var suites
, startTime
@@ -131,7 +108,7 @@ exports.run = function(list, options, callback) {
index++;
var opts =
- { parallel: options.parallel
+ { parallel: options.testsParallel
, testName: options.testName
, onTestDone: function(result) {
testFinished(suite, result);
View
293 lib/running.js
@@ -3,6 +3,33 @@ var runners = {};
// keeps track of the default
var defRunner;
+var flags = {};
+flags['Behavior'] =
+ [ { longFlag: 'test-name'
+ , shortFlag: 't'
+ , multiple: true
+ , description: 'only run tests with the specified name'
+ , takesValue: 'name'
+ }
+ , { longFlag: 'suite-name'
+ , multiple: true
+ , shortFlag: 's'
+ , description: 'only run suites with the specified name'
+ , takesValue: 'name'
+ }
+ , { longFlag: 'parallel'
+ , shortFlag: 'p'
+ , takesValue: 'what'
+ , options: ['both', 'neither', 'tests', 'suites']
+ , def: 'both'
+ , description: 'what to run in parallel'
+ }
+ , { longFlag: 'help'
+ , shortFlag: 'h'
+ , description: 'output this help message'
+ }
+ ];
+
/* Allow people to add their own runners
*
* A runner should export 4 things:
@@ -20,7 +47,7 @@ exports.registerRunner = function(p, def) {
// TODO check to make sure we have everything we need
- runners[m.name] =
+ var r = runners[m.name] =
{ module: m
, name: m.name
, runnerFlag: m.runnerFlag
@@ -31,6 +58,9 @@ exports.registerRunner = function(p, def) {
if (def) {
defRunner = m.name;
}
+
+ flags['Behavior'].push(r.runnerFlag);
+ flags[m.name + ' Runner'] = r.optionsFlags;
}
// add the built in runners
@@ -49,154 +79,162 @@ exports.registerRunner('./web-runner');
* failed. If you don't need a callback but don't want it to do
* that, then pass something falsey
*/
-exports.run = function(list, args, cb) {
- if (!list) {
- list = [];
- }
- if (list.constructor != Array) {
- // if it isn't an array, a module was passed in directly to be ran
- list = [list];
- }
+exports.run = function() {
+ var args = Array.prototype.slice.call(arguments)
+ , cb
+ ;
- var flags = {};
-
- flags['Behavior'] =
- [ { longFlag: 'test-name'
- , shortFlag: 't'
- , description: 'only run tests with the specified name'
- , varName: 'name'
- }
- , { longFlag: 'suite-name'
- , shortFlag: 's'
- , description: 'only run suites with the specified name'
- , varName: 'name'
- }
- , { longFlag: 'parallel'
- , shortFlag: 'p'
- , description: 'run the tests in parallel'
- }
- , { longFlag: 'suites-parallel'
- , shortFlag: 'P'
- , description: 'run the suites in parallel'
+ if (typeof args[args.length-1] == 'function') {
+ // they supplied their own callback
+ cb = args.pop();
+ }
+ else {
+ // they didn't supply a callback, so assume they don't care when this
+ // ends, so, we create our own callback which exits with the number of
+ // problems when everything is done
+ cb = function (problems) {
+ // we only want to exit after we know everything has been written to
+ // stdout, otherwise sometimes not all the output from tests will have
+ // been printed. Thus we write an empty string to stdout and then make sure
+ // it is 'drained' before exiting
+ var written = process.stdout.write('');
+ if (written) {
+ process.exit(problems);
}
- , { longFlag: 'help'
- , shortFlag: 'h'
- , description: 'output this help message'
+ else {
+ process.stdout.on('drain', function drained() {
+ process.stdout.removeListener('drain', drained);
+ process.exit(problems);
+ });
}
- ]
+ }
+ }
+ var fileList = [];
var options = {}
- var runnerFlags = {};
- for(var name in runners) {
- var r = runners[name];
-
- if (defRunner == name) {
- // this is the default
- options.runner = r;
- }
- else {
- flags['Behavior'].push(r.runnerFlag);
- runnerFlags[r.runnerFlag.longFlag || r.runnerFlag.shortFlag] = name;
- }
+ // fill up options and fileList
+ exports.parseRunArguments(args, fileList, options, flags);
- flags[name + ' Runner'] = r.optionsFlags;
- };
-
- for (var i = 2; i < args.length; i++) {
- var found = false;;
- for (var group in flags) {
- for (var j = 0; j < flags[group].length; j++) {
- var key = null;
- if (flags[group][j].longFlag && args[i] == '--'+flags[group][j].longFlag) {
- key = flags[group][j].longFlag;
- }
- else if (flags[group][j].shortFlag && args[i] == '-'+flags[group][j].shortFlag) {
- key = flags[group][j].longFlag || flags[group][j].shortFlag;
- }
- if (key) {
- if (flags[group][j].varName) {
- var el = args.slice(i+1,i+2)[0];
- if (options[key]) {
- options[key].push(el);
- }
- else {
- options[key] = [el];
- }
- i++;
- }
- else if (key in runnerFlags) {
- options.runner = runners[runnerFlags[key]];
- }
- else {
- options[key] = true;
- }
- break;
- }
- }
-
- if (j != flags[group].length) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- list.push(args[i]);
- }
- }
+ // set individual test and suite parallel options
+ options.testsParallel = options.parallel === true || options.parallel === 'tests' || options.parallel === 'both' ? true : false;
+ options.suitesParallel = options.parallel === true || options.parallel === 'suites' || options.parallel === 'both' ? true : false;
+ delete options.parallel;
if (options.help) {
return generateHelp(flags);
}
- if (list.length === 0) {
- list = ['.'];
+ // if we were given no files to run, run the current directory
+ if (fileList.length === 0) {
+ fileList = ['.'];
}
- // clean up list
- for(var i = 0; i < list.length; i++) {
+
+ // clean up fileList
+ for(var i = 0; i < fileList.length; i++) {
// if it is a filename and the filename starts with the current directory
// then remove that so the results are more succinct
- if (list[i].indexOf(process.cwd()) === 0 && list[i].length > (process.cwd().length+1)) {
- list[i] = list[i].replace(process.cwd()+'/', '');
+ if (fileList[i].indexOf(process.cwd()) === 0 && fileList[i].length > (process.cwd().length+1)) {
+ fileList[i] = fileList[i].replace(process.cwd()+'/', '');
}
}
- var runner = options.runner.module.run;
-
- // clean up universal options
- options.testName = options['test-name'];
- options.suiteName = options['suite-name'];
- delete options['test-name'];
- delete options['suite-name'];
+ var runner = runners[options.runner || defRunner].module.run;
delete options.runner;
// if no callback was supplied they don't care about knowing when things
// finish so assume we can exit with the number of 'problems'
- if (typeof cb == 'undefined') {
- cb = function (problems) {
- // we only want to exit once we know everything has been written to stdout,
- // otherwise sometimes not all the output from tests will have been written.
- // So we write an empty string to stdout and then make sure it is done before
- // exiting
+ if (!cb) {
+ }
- var written = process.stdout.write('');
- if (written) {
- exit();
- }
- else {
- process.stdout.on('drain', function drained() {
- process.stdout.removeListener('drain', drained);
- exit();
- });
- }
+ runner(fileList, options, cb);
+}
- function exit() {
- process.exit(problems);
+exports.parseRunArguments = function(args, fileList, options, flags) {
+ var arg;
+ while(arg = args.shift()) {
+ if (typeof arg == 'string') {
+ fileList.push(arg);
+ }
+ else if(arg.constructor == Array) {
+ var i = arg == process.ARGV ? 1 : 0;
+ for (; i < arg.length; i++) {
+ var found = false;
+ for (var group in flags) {
+ for (var j = 0; j < flags[group].length; j++) {
+ var flag = flags[group][j]
+ , a = arg[i]
+ , key = null
+ , el = null
+ ;
+
+ if (a.indexOf('=') > -1) {
+ a = a.split('=');
+ el = a[1];
+ a = a[0];
+ }
+
+ if ( (flag.longFlag && a == '--'+flag.longFlag)
+ || (flag.shortFlag && a == '-'+flag.shortFlag) ) {
+ key = flag.key || flag.longFlag || flag.shortFlag;
+ }
+
+ if (key) {
+ key = dashedToCamelCase(key);
+
+ if (flag.takesValue) {
+ if (!el) {
+ if (!flag.options || flag.options.indexOf(arg[i+1]) > -1) {
+ el = arg.slice(i+1,i+2)[0];
+ i++;
+ }
+ else {
+ el = flag.def;
+ }
+ }
+
+ if (flag.multiple) {
+ if (options[key]) {
+ options[key].push(el);
+ }
+ else {
+ options[key] = [el];
+ }
+ }
+ else {
+ options[key] = el;
+ }
+ }
+ else {
+ options[key] = 'value' in flag ? flag.value : true;
+ }
+ break;
+ }
+ }
+
+ if (j != flags[group].length) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ fileList.push(arg[i]);
+ }
+ }
+ }
+ else {
+ for (var key in arg) {
+ options[key] = arg[key];
}
}
}
- runner(list, options, cb);
+}
+
+function dashedToCamelCase(key) {
+ var parts = key.split('-');
+ return parts[0] +
+ parts.slice(1).map(function(str) { return str.substr(0,1).toUpperCase() + str.substr(1); }).join('');
}
// creates the help message for running this from the command line
@@ -242,10 +280,15 @@ function generateHelp(flags) {
}
s += '-' + flags[group][i].shortFlag;
}
- if (flags[group][i].varName) {
- s += ' <'+flags[group][i].varName+'>';
+ if (flags[group][i].takesValue) {
+ s += ' <'+flags[group][i].takesValue+'>';
}
- console.log(s + ': ' + flags[group][i].description);
+ console.log(
+ s +
+ ': ' +
+ flags[group][i].description +
+ (flags[group][i].options ? ' ('+flags[group][i].options.join(', ')+')' : '')
+ );
}
console.log('');
}
View
12 lib/web-runner.js
@@ -12,6 +12,8 @@ exports.name = 'Web';
exports.runnerFlag =
{ description: 'use the web runner for running tests from your browser'
, longFlag: 'web'
+ , key: 'runner'
+ , value: 'Web'
, shortFlag: 'w'
};
@@ -38,19 +40,11 @@ exports.run = function(list, options) {
return;
}
- options.parallel = options.parallel || false;
-
options.port = parseInt(options.port);
if (isNaN(options.port)) {
options.port = 8765;
}
- if (typeof options['suites-parallel'] == 'undefined') {
- options['suites-parallel'] = false;
- }
- options.suitesParallel = options['suites-parallel'];
- delete options['suites-parallel'];
-
var suites
, queue = []
, running = []
@@ -72,7 +66,7 @@ exports.run = function(list, options) {
}
for (var i = 0; i < suites.length; i++) {
- suites[i].parallel = options.parallel;
+ suites[i].parallel = options.testsParallel;
}
var dir = __dirname + '/web-runner/public';
View
166 test/test-parseRunArguments.js
@@ -0,0 +1,166 @@
+if (module == require.main) {
+ return require('../lib/async_testing').run(process.ARGV);
+}
+
+var parse = require('../lib/running').parseRunArguments;
+
+var flags =
+ { 'group':
+ [ { longFlag: 'first'
+ }
+ , { longFlag: 'flag-with-dashes'
+ }
+ , { longFlag: 'single'
+ , takesValue: 'number'
+ }
+ , { longFlag: 'multiple'
+ , takesValue: 'number'
+ , multiple: true
+ }
+ , { longFlag: 'key'
+ , key: 'keyed'
+ }
+ , { longFlag: 'value'
+ , value: 42
+ }
+ , { longFlag: 'value0'
+ , value: 0
+ }
+ , { longFlag: 'value-key'
+ , key: 'keyedValued'
+ , value: 10
+ }
+ ]
+ };
+
+module.exports =
+ { 'test string': function(test) {
+ var l = [];
+ var o = {};
+
+ parse(['name'], l, o, flags);
+
+ test.deepEqual(['name'], l);
+ test.deepEqual({}, o);
+ test.finish();
+ }
+ , 'test object': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([{first: true}], l, o, flags);
+
+ test.deepEqual([], l);
+ test.deepEqual({first: true}, o);
+ test.finish();
+ }
+ , 'test array': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['name', '--first']], l, o, flags);
+
+ test.deepEqual({first: true}, o);
+ test.finish();
+ }
+ , 'test order1': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([{first: false}, ['--first']], l, o, flags);
+
+ test.deepEqual({first: true}, o);
+ test.finish();
+ }
+ , 'test order2': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--first'], {first: false}], l, o, flags);
+
+ test.deepEqual({first: false}, o);
+ test.finish();
+ }
+ , 'test flag -> key conversion': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--flag-with-dashes']], l, o, flags);
+
+ test.deepEqual({'flagWithDashes': true}, o);
+ test.finish();
+ }
+ , 'test single once': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--single', 'one']], l, o, flags);
+
+ test.deepEqual({'single': 'one'}, o);
+ test.finish();
+ }
+ , 'test single twice': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--single', 'one', '--single', 'two']], l, o, flags);
+
+ test.deepEqual({'single': 'two'}, o);
+ test.finish();
+ }
+ , 'test multiple once': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--multiple', 'one']], l, o, flags);
+
+ test.deepEqual({'multiple': ['one']}, o);
+ test.finish();
+ }
+ , 'test multiple twice': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--multiple', 'one', '--multiple', 'two']], l, o, flags);
+
+ test.deepEqual({'multiple': ['one','two']}, o);
+ test.finish();
+ }
+ , 'test key': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--key']], l, o, flags);
+
+ test.deepEqual({'keyed': true}, o);
+ test.finish();
+ }
+ , 'test value': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--value']], l, o, flags);
+
+ test.deepEqual({'value': 42}, o);
+ test.finish();
+ }
+ , 'test 0 value': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--value0']], l, o, flags);
+
+ test.deepEqual({'value0': 0}, o);
+ test.finish();
+ }
+ , 'test value and key': function(test) {
+ var l = [];
+ var o = {};
+
+ parse([['--value-key']], l, o, flags);
+
+ test.deepEqual({'keyedValued': 10}, o);
+ test.finish();
+ }
+ };
+
View
86 todo.txt
@@ -1,42 +1,62 @@
-Featueres
----------
-async_testing.run: rethink args to run's callback (make them more useful)
-async_testing.run: make it so run can take any combination of the following...
- -> filename, options object, ARGV array (which can have filenames)
- (in any order) (before it's callback)
- The order that they are specified designated importance, latter are more important
- Example:
- async_testing.run(process.ARGV.slice(1)) // normal case
- async_testing.run({parallel: true}, process.ARGV.slice(1)) // parallel by default
- async_testing.run({parallel: true}, 'somefile.js', process.ARGV.slice(1)) //specific file
- async_testing.run(process.ARGV.slice(1), {parallel: true}) // parallel no matter what
- and so on
+Featueres (not sorted in order of importance
+--------------------------------------------
+async_testing.run:
++ better error handling when parsing arguments, and by better I mean, some
++ rethink args to run's callback (make them more useful)
++ help message summarizing async_testing in generateHelp
+? allow a config file (at say ~/.node-async-testing.json) or something for
+ setting default run options, so say if you want the default to have tests and
+ suites be parallel you can do that.
+? make the default to be to run test and suites in parallel?
+? Add new flag, which says to run everything in parallel, but if a suite fails
+ in some way, don't output it's results, instead re-run the suite serially
++ stop using home grown options parser and add one as a sub module
-Console Runner: readd number of completed tests back to summary in console runner
-Console Runner: are we being too redundant when we have errors?
+Console Runner:
++ readd number of completed tests back to summary in console runner
++ are we being too redundant when we have errors?
-Web Runner: checkbox for web runner to automatically run suites on window or tab focus
-Web Runner: keep track of which suites have been opened and are parallel across refreshes
-Web Runner: checkbox to run suites in parallel or not (right now you have to specify this
+Web Runner:
++ checkbox for web runner to automatically run suites on window or tab focus
++ keep track of which suites have been opened and are parallel across refreshes
+ (in a cookie)
++ checkbox to run suites in parallel or not (right now you have to specify this
via the command line)
-Web Runner: Instead of just show test as blank when a file changes, mention something?
-Web Runner: Show number of failures when the test is closed?
+? Instead of just show test as blank when a file changes, mention something?
+? Show number of failures when the test is closed?
++ only show suites that have tests we are running and only show those tests (in
+ the case of the --test-name flag)
-code coverage
-test.finish can take error? so you could say do:
- fs.readFile(test.finish)
- to make sure that readFile doesn't error
-timeout for suites
-improve stack traces for assertions failures (remove first line, which is just the
- wrapped assertion being called)
+Running tests (async_testing.runSuite, async_testing.runFile):
+? consider combining suiteCompleted, suiteError, suiteExit and suiteLoadError
+ all into one callback, (suiteDone)
++ wrapped assertions should check to see if test has finished first, and if it
+ has fail indicating so
+? test.finish can take error? so you could say do:
+ `fs.readFile(test.finish)`
+ to make sure that readFile doesn't error without having to write your
+ own callback
++ timeout for suites or tests, the easiest and most fool proof way would be to
+ just add this to, runFile and just have it kill the process after a certain
+ amount of time. It could look at the events it is getting from the child and
+ restart the timeout every time a test finishes. If we want to do this in
+ runSuite I don't think there is anything we can do to about something like
+ this happening (since node is single threaded and callbacks won't interrupt
+ running code):
+ `while(true) {}`
++ improve stack traces for assertion failures (remove first line, which is just
+ the wrapped assertion being called)
++ code coverage
Docs
----
-update comments in lib/testing for changes to...
++ get a favicon for webiste, maybe success.png from web-runner?
++ update comments in lib/testing for changes to...
runSuite (tests functions can be arrays)
runFile
-add note to doc for runSuite about it being the only thing running at any given time
-update doc about expandFiles' return value for path, it is a module path
-update doc about errors while running tests
-document onSuiteLoadError
-document run's callback
++ add note to doc for runSuite about how it should be the only thing running at
+ any given time
++ update doc about expandFiles' return value for path, it is a module path
++ update doc about errors while running tests
++ document onSuiteLoadError
++ document run's callback

0 comments on commit 0643a38

Please sign in to comment.