Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge branch 'master' into Test-Builder1.5

The new t/subtest/wstat.t and t/subtest/bail_out.t tests are failing.
Going to work on that next.

Conflicts:
	.gitignore
	Changes
	MANIFEST
	MANIFEST.SKIP
	Makefile.PL
	lib/Test/Builder.pm
	lib/Test/Builder/Module.pm
	lib/Test/Builder/Tester.pm
	lib/Test/Builder/Tester/Color.pm
	lib/Test/More.pm
	lib/Test/Simple.pm
	t/lib/Test/Builder/NoOutput.pm
	t/subtest/basic.t
  • Loading branch information...
commit f3ec85ada357f4b079b02071c7457c7530363bda 2 parents 7d47f1e + fff6727
@schwern schwern authored
View
23 Changes
@@ -100,6 +100,29 @@ See README and version control log for Test::Builder2 changes.
Very incomplete.
+0.98_01 Tue Nov 8 17:07:58 PST 2011
+ Bug Fixes
+ * BAIL_OUT works inside a subtest. (Larry Leszczynski) [github #138]
+ * subtests now work with threads turned on. [github #145]
+
+ Feature Changes
+ * use_ok() will now apply lexical effects. [rt.cpan.org 67538]
+ (Father Chrysostomos)
+
+ Misc
+ * Test::More, Test::Simple and Test::Builder::Module now require
+ a minimum version of Test::Builder. This avoids Test::More and
+ Test::Builder from getting out of sync. [github #89]
+
+
+0.98 Wed, 23 Feb 2011 14:38:02 +1100
+ Bug Fixes
+ * subtest() should not fail if $? is non-zero. (Aaron Crane)
+
+ Docs
+ * The behavior of is() and undef has been documented. (Pedro Melo)
+
+
0.97_01 Fri Aug 27 22:50:30 PDT 2010
Test Fixes
* Adapted the tests for the new Perl 5.14 regex stringification.
View
4 MANIFEST
@@ -248,6 +248,7 @@ t/skip.t
t/skip_before_plan.t
t/skipall.t
t/subtest/args.t
+t/subtest/bail_out.t
t/subtest/basic.t
t/subtest/default.t
t/subtest/die.t
@@ -259,6 +260,9 @@ t/subtest/line_numbers.t
t/subtest/plan.t
t/subtest/predicate.t
t/subtest/todo.t
+t/subtest/threads.t
+t/subtest/todo.t
+t/subtest/wstat.t
t/TB1vsTB2.t
t/tbm_doesnt_set_exported_to.t
t/test.pl
View
1  MANIFEST.SKIP
@@ -75,3 +75,4 @@
# Don't distribute our distribution tools
^dist/
+
View
15 Makefile.PL
@@ -124,15 +124,20 @@ MAKE
$make =~ s{\s+\z}{\n};
my @perls = qw(
- perl5.8.8
- perl5.8.9
- perl5.10.0
+ perl5.14.1
+ perl5.12.4
+ perl5.12.3
perl5.10.1
- perl5.12.0
- perl5.12.1
+ perl5.10.0
+ perl5.8.9
+ perl5.8.8
);
for my $perl (@perls) {
+ if( !`which $perl` ) {
+ print STDERR "Missing $perl";
+ next;
+ }
$make .= sprintf <<'END', $perl;
cd $(DISTVNAME) && $(MAKE) clean && %s Makefile.PL && PERL_RELEASING=0 $(MAKE) test $(PASTHRU)
END
View
7 lib/Test/Builder.pm
@@ -299,6 +299,7 @@ sub counter {
return $counter;
}
+
=back
=head2 Setting up tests
@@ -448,7 +449,7 @@ Or to plan a variable number of tests:
for my $test (@tests) {
$Test->ok($test);
}
- $Test->done_testing(@tests);
+ $Test->done_testing(scalar @tests);
=cut
@@ -746,6 +747,8 @@ sub _is_dualvar {
Like Test::More's C<is()>. Checks if C<$got eq $expected>. This is the
string version.
+C<undef> only ever matches another C<undef>.
+
=item B<is_num>
$Test->is_num($got, $expected, $name);
@@ -753,6 +756,8 @@ string version.
Like Test::More's C<is()>. Checks if C<$got == $expected>. This is the
numeric version.
+C<undef> only ever matches another C<undef>.
+
=cut
sub is_eq {
View
2  lib/Test/Builder/Module.pm
@@ -2,7 +2,7 @@ package Test::Builder::Module;
use strict;
-use Test::Builder;
+use Test::Builder 0.98;
require Exporter;
our @ISA = qw(Exporter);
View
2  lib/Test/Builder/Tester.pm
@@ -1,7 +1,7 @@
package Test::Builder::Tester;
use strict;
-our $VERSION = "1.21_07";
+our $VERSION = "1.22_07";
use Test::Builder;
use Symbol;
View
2  lib/Test/Builder/Tester/Color.pm
@@ -1,7 +1,7 @@
package Test::Builder::Tester::Color;
use strict;
-our $VERSION = "1.21_07";
+our $VERSION = "1.22_07";
require Test::Builder::Tester;
View
103 lib/Test/FAQ.pod
@@ -10,7 +10,7 @@ issues with Perl.
=head2 Is there any tutorial on testing?
-Test::Tutorial
+L<Test::Tutorial>
=head2 Are there any modules for testing?
@@ -20,23 +20,23 @@ Then go onto L<http://search.cpan.org> and search for "Test".
=head2 Are there any modules for testing web pages/CGI programs?
-Test::WWW::Mechanize, Test::WWW::Selenium
+L<Test::WWW::Mechanize>, L<Test::WWW::Selenium>
=head2 Are there any modules for testing external programs?
-Test::Cmd
+L<Test::Cmd>
=head2 Can you do xUnit/JUnit style testing in Perl?
-Yes, Test::Class allows you to write test methods while continuing to
+Yes, L<Test::Class> allows you to write test methods while continuing to
use all the usual CPAN testing modules. It is the best and most
perlish way to do xUnit style testing.
-Test::Unit is a more direct port of XUnit to Perl, but it does not use
+L<Test::Unit> is a more direct port of XUnit to Perl, but it does not use
the Perl conventions and does not play well with other CPAN testing
modules. As of this writing, it is abandoned. B<Do not use>.
-The Test::Inline (aka Pod::Tests) is worth mentioning as it allows you to
+The L<Test::Inline> (aka L<Pod::Tests>) is worth mentioning as it allows you to
put tests into the POD in the same file as the code.
@@ -109,24 +109,67 @@ all.
=head2 How do I measure the coverage of my test suite?
-Devel::Cover
+L<Devel::Cover>
=head2 How do I get tests to run in a certain order?
Tests run in alphabetical order, so simply name your test files in the order
you want them to run. Numbering your test files works, too.
-To achieve a specific order, try Test::Manifest.
+ t/00_compile.t
+ t/01_config.t
+ t/zz_teardown.t
+
+0 runs first, z runs last.
+
+To achieve a specific order, try L<Test::Manifest>.
+
+Typically you do B<not> want your tests to require being run in a
+certain order, but it can be useful to do a compile check first or to
+run the tests on a very basic module before everything else. This
+gives you early information if a basic module fails which will bring
+everything else down.
+
+Another use is if you have a suite wide setup/teardown, such as
+creating and delete a large test database, which may be too
+expensive to do for every test.
+
+We recommend B<against> numbering every test file. For most files
+this ordering will be arbitrary and the leading number obscures the
+real name of the file. See L<What should I name my test files?> for
+more information.
+
=head2 What should I name my tests?
+=head2 What should I name my test files?
+
+A test filename serves three purposes:
+
+Most importantly, it serves to identify what is being tested. Each
+test file should test a clear piece of functionality. This could be
+at single class, a single method, even a single bug.
+
+The order in which tests are run is usually dictated by the filename.
+See L<How do I get tests to run in a certain order?> for details.
+
+Finally, the grouping of tests into common bits of functionality can
+be achieved by directory and filenames. For example, all the tests
+for Test::Builder are in the F<t/Builder/> directory.
+
+As an example, F<t/Builder/reset.t> contains the tests for
+C<< Test::Builder->reset >>. F<t/00compile.t> checks that everything
+compiles, and it will run first. F<t/dont_overwrite_die_handler.t>
+checks that we don't overwrite the C<<$SIG{__DIE__}>> handler.
+
+
=head2 How do I deal with tests that sometimes pass and sometimes fail?
=head2 How do I test with a database/network/server that the user may or may not have?
=head2 What's a good way to test lists?
-is_deeply() from Test::More as well as Test::Deep.
+C<is_deeply()> from L<Test::More> as well as L<Test::Deep>.
=head2 Is there such a thing as untestable code?
@@ -149,19 +192,19 @@ Even a random number generator can be tested.
=head2 How do I check the right warnings are issued?
-Test::Warn
+L<Test::Warn>
=head2 How do I test code that prints?
-Test::Output
+L<Test::Output>
=head2 I want to test that my code dies when I do X
-Test::Exception
+L<Test::Exception>
=head2 I want to print out more diagnostic info on failure.
-ok(...) || diag "...";
+C<ok(...) || diag "...";>
=head2 How can I simulate failures to make sure that my code does the Right Thing in the face of them?
@@ -256,9 +299,38 @@ say C<use lib 't/lib'>.
=head2 Why do I need more than ok?
+Since every test can be reduced to checking if a statement is true,
+ok() can test everything. But ok() doesn't tell you why the test
+failed. For that you need to tell the test more... which is why
+you need L<Test::More>.
+
+ ok $pirate->name eq "Roberts", "person's name";
+
+ not ok 1 - person's name
+ # Failed test at pirates.t line 23.
+
+If the above fails, you don't know what C<< $person->name >> returned.
+You have to go in and add a C<diag> call. This is time consuming. If
+it's a heisenbug, it might not fail again! If it's a user reporting a
+test failure, they might not be bothered to hack the tests to give you
+more information.
+
+ is $person->name, "Roberts", "person's name";
+
+ not ok 1 - person's name
+ # Failed test at pirates.t line 23.
+ # got: 'Wesley'
+ # expected: 'Roberts'
+
+Using C<is> from L<Test::More> you now know what value you got and
+what value you expected.
+
+The most useful functions in Test::More are is, like and is_deeply.
+
+
=head2 What's wrong with C<print $test ? "ok" : "not ok">?
-=head2 How do I check for an infinite loop
+=head2 How do I check for an infinite loop?
On Mon, Mar 18, 2002 at 03:57:55AM -0500, Mark-Jason Dominus wrote:
>
@@ -275,7 +347,8 @@ On Mon, Mar 18, 2002 at 03:57:55AM -0500, Mark-Jason Dominus wrote:
=head2 How do I use the comparison functions of a testing module without it being a test?
-Any testing function based on Test::Builder, most are, can be quieted so it does not do any testing. It simply returns true or false. Use the following code...
+Any testing function based on Test::Builder, most are, can be quieted so it does
+not do any testing. It simply returns true or false. Use the following code...
use Test::More; # or any testing module
View
46 lib/Test/More.pm
@@ -20,7 +20,7 @@ sub _carp {
our $VERSION = '2.00_07';
$VERSION = eval $VERSION; ## no critic (BuiltinFunctions::ProhibitStringyEval)
-use Test::Builder::Module;
+use Test::Builder::Module 0.98;
our @ISA = qw(Test::Builder::Module);
our @EXPORT = qw(ok use_ok require_ok
is isnt like unlike is_deeply
@@ -317,6 +317,11 @@ are similar to these:
ok( ultimate_answer() eq 42, "Meaning of Life" );
ok( $foo ne '', "Got some foo" );
+C<undef> will only ever match C<undef>. So you can test a value
+agains C<undef> like this:
+
+ is($not_defined, undef, "undefined as expected");
+
(Mnemonic: "This is that." "This isn't that.")
So why use these? They produce better diagnostics on failure. C<ok()>
@@ -392,7 +397,7 @@ So this:
is similar to:
- ok( $got =~ /expected/, 'this is like that');
+ ok( $got =~ m/expected/, 'this is like that');
(Mnemonic "This is like that".)
@@ -824,6 +829,9 @@ import anything, use C<require_ok>.
BEGIN { require_ok "Foo" }
+Lexical effects will occur as usual. For example, this will turn on strictures.
+
+ BEGIN { use_ok "strict"; }
=cut
@@ -834,27 +842,35 @@ sub use_ok ($;@) {
my( $pack, $filename, $line ) = caller;
- my $code;
+ my $f = $filename;
+ $f =~ s/[\n\r]/_/g; # so it doesn't run off the "#line $line $f" line
+
+ my $version;
if( @imports == 1 and $imports[0] =~ /^\d+(?:\.\d+)?$/ ) {
- # probably a version check. Perl needs to see the bare number
- # for it to work with non-Exporter based modules.
- $code = <<USE;
-package $pack;
-use $module $imports[0];
-1;
-USE
+ # probably a version check
+ $version = shift @imports;
}
- else {
- $code = <<USE;
+
+ my $version_check = defined $version ? qq{$module->VERSION($version)} : "";
+ my $code = <<"USE";
package $pack;
-use $module \@{\$args[0]};
+#line $line $f
+require $module; $version_check; $module->import(\@{\$args[0]});
+# Work around [perl #70151]
+\${\$args[1]} = \$^H;
+%{\$args[2]} = %^H;
1;
USE
- }
- my( $eval_result, $eval_error ) = _eval( $code, \@imports );
+ my( $eval_result, $eval_error )
+ = _eval( $code, \@imports, \my($hints, %hints) );
my $ok = $tb->ok( $eval_result, "use $module;" );
+ if( $ok ) {
+ $^H = $hints;
+ %^H = %hints;
+ }
+
unless($ok) {
chomp $eval_error;
$@ =~ s{^BEGIN failed--compilation aborted at .*$}
View
206 lib/Test/Tutorial.pod
@@ -5,8 +5,8 @@ Test::Tutorial - A tutorial about writing really basic tests
=head1 DESCRIPTION
-I<AHHHHHHH!!!! NOT TESTING! Anything but testing!
-Beat me, whip me, send me to Detroit, but don't make
+I<AHHHHHHH!!!! NOT TESTING! Anything but testing!
+Beat me, whip me, send me to Detroit, but don't make
me write tests!>
I<*sob*>
@@ -16,7 +16,7 @@ I<Besides, I don't know how to write the damned things.>
Is this you? Is writing tests right up there with writing
documentation and having your fingernails pulled out? Did you open up
-a test and read
+a test and read
######## We start with some black magic
@@ -36,7 +36,7 @@ Here's the most basic test program.
print 1 + 1 == 2 ? "ok 1\n" : "not ok 1\n";
-since 1 + 1 is 2, it prints:
+Because 1 + 1 is 2, it prints:
1..1
ok 1
@@ -44,11 +44,11 @@ since 1 + 1 is 2, it prints:
What this says is: C<1..1> "I'm going to run one test." [1] C<ok 1>
"The first test passed". And that's about all magic there is to
testing. Your basic unit of testing is the I<ok>. For each thing you
-test, an C<ok> is printed. Simple. B<Test::Harness> interprets your test
+test, an C<ok> is printed. Simple. L<Test::Harness> interprets your test
results to determine if you succeeded or failed (more on that later).
Writing all these print statements rapidly gets tedious. Fortunately,
-there's B<Test::Simple>. It has one function, C<ok()>.
+there's L<Test::Simple>. It has one function, C<ok()>.
#!/usr/bin/perl -w
@@ -56,7 +56,7 @@ there's B<Test::Simple>. It has one function, C<ok()>.
ok( 1 + 1 == 2 );
-and that does the same thing as the code above. C<ok()> is the backbone
+That does the same thing as the previous code. C<ok()> is the backbone
of Perl testing, and we'll be using it instead of roll-your-own from
here on. If C<ok()> gets a true value, the test passes. False, it
fails.
@@ -67,7 +67,7 @@ fails.
ok( 1 + 1 == 2 );
ok( 2 + 2 == 5 );
-from that comes
+From that comes:
1..2
ok 1
@@ -75,28 +75,31 @@ from that comes
# Failed test (test.pl at line 5)
# Looks like you failed 1 tests of 2.
-C<1..2> "I'm going to run two tests." This number is used to ensure
-your test program ran all the way through and didn't die or skip some
-tests. C<ok 1> "The first test passed." C<not ok 2> "The second test
-failed". Test::Simple helpfully prints out some extra commentary about
-your tests.
+C<1..2> "I'm going to run two tests." This number is a I<plan>. It helps to
+ensure your test program ran all the way through and didn't die or skip some
+tests. C<ok 1> "The first test passed." C<not ok 2> "The second test failed".
+Test::Simple helpfully prints out some extra commentary about your tests.
It's not scary. Come, hold my hand. We're going to give an example
of testing a module. For our example, we'll be testing a date
-library, B<Date::ICal>. It's on CPAN, so download a copy and follow
+library, L<Date::ICal>. It's on CPAN, so download a copy and follow
along. [2]
=head2 Where to start?
-This is the hardest part of testing, where do you start? People often
-get overwhelmed at the apparent enormity of the task of testing a
-whole module. Best place to start is at the beginning. Date::ICal is
-an object-oriented module, and that means you start by making an
-object. So we test C<new()>.
+This is the hardest part of testing, where do you start? People often get
+overwhelmed at the apparent enormity of the task of testing a whole module.
+The best place to start is at the beginning. C<Date::ICal> is an
+object-oriented module, and that means you start by making an object. Test
+C<new()>.
#!/usr/bin/perl -w
+ # assume these two lines are in all subsequent examples
+ use strict;
+ use warnings;
+
use Test::Simple tests => 2;
use Date::ICal;
@@ -105,19 +108,19 @@ object. So we test C<new()>.
ok( defined $ical ); # check that we got something
ok( $ical->isa('Date::ICal') ); # and it's the right class
-run that and you should get:
+Run that and you should get:
1..2
ok 1
ok 2
-congratulations, you've written your first useful test.
+Congratulations! You've written your first useful test.
=head2 Names
-That output isn't terribly descriptive, is it? When you have two
-tests you can figure out which one is #2, but what if you have 102?
+That output isn't terribly descriptive, is it? When you have two tests you can
+figure out which one is #2, but what if you have 102 tests?
Each test can be given a little descriptive name as the second
argument to C<ok()>.
@@ -127,7 +130,7 @@ argument to C<ok()>.
ok( defined $ical, 'new() returned something' );
ok( $ical->isa('Date::ICal'), " and it's the right class" );
-So now you'd see...
+Now you'll see:
1..2
ok 1 - new() returned something
@@ -136,8 +139,8 @@ So now you'd see...
=head2 Test the manual
-Simplest way to build up a decent testing suite is to just test what
-the manual says it does. [3] Let's pull something out of the
+The simplest way to build up a decent testing suite is to just test what
+the manual says it does. [3] Let's pull something out of the
L<Date::ICal/SYNOPSIS> and test that all its bits work.
#!/usr/bin/perl -w
@@ -146,20 +149,20 @@ L<Date::ICal/SYNOPSIS> and test that all its bits work.
use Date::ICal;
- $ical = Date::ICal->new( year => 1964, month => 10, day => 16,
- hour => 16, min => 12, sec => 47,
- tz => '0530' );
+ $ical = Date::ICal->new( year => 1964, month => 10, day => 16,
+ hour => 16, min => 12, sec => 47,
+ tz => '0530' );
ok( defined $ical, 'new() returned something' );
ok( $ical->isa('Date::ICal'), " and it's the right class" );
ok( $ical->sec == 47, ' sec()' );
- ok( $ical->min == 12, ' min()' );
+ ok( $ical->min == 12, ' min()' );
ok( $ical->hour == 16, ' hour()' );
ok( $ical->day == 17, ' day()' );
ok( $ical->month == 10, ' month()' );
ok( $ical->year == 1964, ' year()' );
-run that and you get:
+Run that and you get:
1..8
ok 1 - new() returned something
@@ -173,45 +176,42 @@ run that and you get:
ok 8 - year()
# Looks like you failed 1 tests of 8.
-Whoops, a failure! [4] Test::Simple helpfully lets us know on what line
-the failure occurred, but not much else. We were supposed to get 17,
-but we didn't. What did we get?? Dunno. We'll have to re-run the
-test in the debugger or throw in some print statements to find out.
+Whoops, a failure! [4] C<Test::Simple> helpfully lets us know on what line the
+failure occurred, but not much else. We were supposed to get 17, but we
+didn't. What did we get?? Dunno. You could re-run the test in the debugger
+or throw in some print statements to find out.
-Instead, we'll switch from B<Test::Simple> to B<Test::More>. B<Test::More>
-does everything B<Test::Simple> does, and more! In fact, Test::More does
-things I<exactly> the way Test::Simple does. You can literally swap
-Test::Simple out and put Test::More in its place. That's just what
+Instead, switch from L<Test::Simple> to L<Test::More>. C<Test::More>
+does everything C<Test::Simple> does, and more! In fact, C<Test::More> does
+things I<exactly> the way C<Test::Simple> does. You can literally swap
+C<Test::Simple> out and put C<Test::More> in its place. That's just what
we're going to do.
-Test::More does more than Test::Simple. The most important difference
-at this point is it provides more informative ways to say "ok".
-Although you can write almost any test with a generic C<ok()>, it
-can't tell you what went wrong. Instead, we'll use the C<is()>
-function, which lets us declare that something is supposed to be the
-same as something else:
-
- #!/usr/bin/perl -w
+C<Test::More> does more than C<Test::Simple>. The most important difference at
+this point is it provides more informative ways to say "ok". Although you can
+write almost any test with a generic C<ok()>, it can't tell you what went
+wrong. The C<is()> function lets us declare that something is supposed to be
+the same as something else:
use Test::More tests => 8;
use Date::ICal;
- $ical = Date::ICal->new( year => 1964, month => 10, day => 16,
- hour => 16, min => 12, sec => 47,
- tz => '0530' );
+ $ical = Date::ICal->new( year => 1964, month => 10, day => 16,
+ hour => 16, min => 12, sec => 47,
+ tz => '0530' );
ok( defined $ical, 'new() returned something' );
ok( $ical->isa('Date::ICal'), " and it's the right class" );
is( $ical->sec, 47, ' sec()' );
- is( $ical->min, 12, ' min()' );
+ is( $ical->min, 12, ' min()' );
is( $ical->hour, 16, ' hour()' );
is( $ical->day, 17, ' day()' );
is( $ical->month, 10, ' month()' );
is( $ical->year, 1964, ' year()' );
"Is C<$ical-E<gt>sec> 47?" "Is C<$ical-E<gt>min> 12?" With C<is()> in place,
-you get some more information
+you get more information:
1..8
ok 1 - new() returned something
@@ -227,24 +227,24 @@ you get some more information
ok 8 - year()
# Looks like you failed 1 tests of 8.
-letting us know that C<$ical-E<gt>day> returned 16, but we expected 17. A
+Aha. C<$ical-E<gt>day> returned 16, but we expected 17. A
quick check shows that the code is working fine, we made a mistake
-when writing up the tests. Just change it to:
+when writing the tests. Change it to:
is( $ical->day, 16, ' day()' );
-and everything works.
+... and everything works.
-So any time you're doing a "this equals that" sort of test, use C<is()>.
+Any time you're doing a "this equals that" sort of test, use C<is()>.
It even works on arrays. The test is always in scalar context, so you
-can test how many elements are in a list this way. [5]
+can test how many elements are in an array this way. [5]
is( @foo, 5, 'foo has 5 elements' );
=head2 Sometimes the tests are wrong
-Which brings us to a very important lesson. Code has bugs. Tests are
+This brings up a very important lesson. Code has bugs. Tests are
code. Ergo, tests have bugs. A failing test could mean a bug in the
code, but don't discount the possibility that the test is wrong.
@@ -289,16 +289,16 @@ or we could set up a little try/expect loop.
is( $ical->month, $expect->[1], ' month()' );
is( $ical->day, $expect->[2], ' day()' );
is( $ical->hour, $expect->[3], ' hour()' );
- is( $ical->min, $expect->[4], ' min()' );
+ is( $ical->min, $expect->[4], ' min()' );
is( $ical->sec, $expect->[5], ' sec()' );
}
-So now we can test bunches of dates by just adding them to
+Now we can test bunches of dates by just adding them to
C<%ICal_Dates>. Now that it's less work to test with more dates, you'll
be inclined to just throw more in as you think of them.
Only problem is, every time we add to that we have to keep adjusting
the C<use Test::More tests =E<gt> ##> line. That can rapidly get
-annoying. There's two ways to make this work better.
+annoying. There are ways to make this work better.
First, we can calculate the plan dynamically using the C<plan()>
function.
@@ -315,22 +315,28 @@ function.
...and then your tests...
-Or to be even more flexible, we use C<no_plan>. This means we're just
+To be even more flexible, use C<done_testing>. This means we're just
running some tests, don't know how many. [6]
- use Test::More 'no_plan'; # instead of tests => 32
+ use Test::More; # instead of tests => 32
+
+ ... # tests here
+
+ done_testing(); # reached the end safely
-now we can just add tests and not have to do all sorts of math to
-figure out how many we're running.
+If you don't specify a plan, C<Test::More> expects to see C<done_testing()>
+before your program exits. It will warn you if you forget it. You can give
+C<done_testing()> an optional number of tests you expected to run, and if the
+number ran differs, C<Test::More> will give you another kind of warning.
=head2 Informative names
-Take a look at this line here
+Take a look at the line:
ok( defined $ical, "new(ical => '$ical_str')" );
-we've added more detail about what we're testing and the ICal string
+We've added more detail about what we're testing and the ICal string
itself we're trying out to the name. So you get results like:
ok 25 - new(ical => '19971024T120000')
@@ -342,8 +348,8 @@ itself we're trying out to the name. So you get results like:
ok 31 - min()
ok 32 - sec()
-if something in there fails, you'll know which one it was and that
-will make tracking down the problem easier. So try to put a bit of
+If something in there fails, you'll know which one it was and that
+will make tracking down the problem easier. Try to put a bit of
debugging information into the test names.
Describe what the tests test, to make debugging a failed test easier
@@ -377,11 +383,12 @@ F<t/01sanity.t> [7]
is( $t2->epoch, 0, " and back to ICal" );
-The beginning of the epoch is different on most non-Unix operating
-systems [8]. Even though Perl smooths out the differences for the
-most part, certain ports do it differently. MacPerl is one off the
-top of my head. [9] So rather than just putting a comment in the test,
-we can explicitly say it's never going to work and skip the test.
+The beginning of the epoch is different on most non-Unix operating systems [8].
+Even though Perl smooths out the differences for the most part, certain ports
+do it differently. MacPerl is one off the top of my head. [9] Rather than
+putting a comment in the test and hoping someone will read the test while
+debugging the failure, we can explicitly say it's never going to work and skip
+the test.
use Test::More tests => 7;
use Date::ICal;
@@ -391,7 +398,7 @@ we can explicitly say it's never going to work and skip the test.
is( $t1->epoch, 0, "Epoch time of 0" );
SKIP: {
- skip('epoch to ICal not working on MacOS', 6)
+ skip('epoch to ICal not working on Mac OS', 6)
if $^O eq 'MacOS';
is( $t1->ical, '19700101Z', " epoch to ical" );
@@ -407,11 +414,11 @@ we can explicitly say it's never going to work and skip the test.
is( $t2->epoch, 0, " and back to ICal" );
}
-A little bit of magic happens here. When running on anything but
-MacOS, all the tests run normally. But when on MacOS, C<skip()> causes
-the entire contents of the SKIP block to be jumped over. It's never
-run. Instead, it prints special output that tells Test::Harness that
-the tests have been skipped.
+A little bit of magic happens here. When running on anything but MacOS, all
+the tests run normally. But when on MacOS, C<skip()> causes the entire
+contents of the SKIP block to be jumped over. It never runs. Instead,
+C<skip()> prints special output that tells C<Test::Harness> that the tests have
+been skipped.
1..7
ok 1 - Epoch time of 0
@@ -422,7 +429,7 @@ the tests have been skipped.
ok 6 # skip epoch to ICal not working on MacOS
ok 7 # skip epoch to ICal not working on MacOS
-This means your tests won't fail on MacOS. This means less emails
+This means your tests won't fail on MacOS. This means fewer emails
from MacPerl users telling you about failing tests that you know will
never work. You've got to be careful with skip tests. These are for
tests which don't work and I<never will>. It is not for skipping
@@ -439,7 +446,7 @@ The tests are wholly and completely skipped. [10] This will work.
=head2 Todo tests
-Thumbing through the Date::ICal man page, I came across this:
+While thumbing through the C<Date::ICal> man page, I came across this:
ical
@@ -448,8 +455,8 @@ Thumbing through the Date::ICal man page, I came across this:
Retrieves, or sets, the date on the object, using any
valid ICal date/time string.
-"Retrieves or sets". Hmmm, didn't see a test for using C<ical()> to set
-the date in the Date::ICal test suite. So I'll write one.
+"Retrieves or sets". Hmmm. I didn't see a test for using C<ical()> to set
+the date in the Date::ICal test suite. So I wrote one:
use Test::More tests => 1;
use Date::ICal;
@@ -458,7 +465,7 @@ the date in the Date::ICal test suite. So I'll write one.
$ical->ical('20201231Z');
is( $ical->ical, '20201231Z', 'Setting via ical()' );
-run that and I get
+Run that. I saw:
1..1
not ok 1 - Setting via ical()
@@ -467,10 +474,10 @@ run that and I get
# expected: '20201231Z'
# Looks like you failed 1 tests of 1.
-Whoops! Looks like it's unimplemented. Let's assume we don't have
-the time to fix this. [11] Normally, you'd just comment out the test
-and put a note in a todo list somewhere. Instead, we're going to
-explicitly state "this test will fail" by wrapping it in a C<TODO> block.
+Whoops! Looks like it's unimplemented. Assume you don't have the time to fix
+this. [11] Normally, you'd just comment out the test and put a note in a todo
+list somewhere. Instead, explicitly state "this test will fail" by wrapping it
+in a C<TODO> block:
use Test::More tests => 1;
@@ -490,14 +497,14 @@ Now when you run, it's a little different:
# got: '20010822T201551Z'
# expected: '20201231Z'
-Test::More doesn't say "Looks like you failed 1 tests of 1". That '#
-TODO' tells Test::Harness "this is supposed to fail" and it treats a
-failure as a successful test. So you can write tests even before
+C<Test::More> doesn't say "Looks like you failed 1 tests of 1". That '#
+TODO' tells C<Test::Harness> "this is supposed to fail" and it treats a
+failure as a successful test. You can write tests even before
you've fixed the underlying code.
-If a TODO test passes, Test::Harness will report it "UNEXPECTEDLY
-SUCCEEDED". When that happens, you simply remove the TODO block with
-C<local $TODO> and turn it into a real test.
+If a TODO test passes, C<Test::Harness> will report it "UNEXPECTEDLY
+SUCCEEDED". When that happens, remove the TODO block with C<local $TODO> and
+turn it into a real test.
=head2 Testing with taint mode.
@@ -510,15 +517,14 @@ in mind, it's very important to ensure your module works under taint
mode.
It's very simple to have your tests run under taint mode. Just throw
-a C<-T> into the C<#!> line. Test::Harness will read the switches
+a C<-T> into the C<#!> line. C<Test::Harness> will read the switches
in C<#!> and use them to run your tests.
#!/usr/bin/perl -Tw
...test normally here...
-So when you say C<make test> it will be run with taint mode and
-warnings on.
+When you say C<make test> it will run with taint mode on.
=head1 FOOTNOTES
@@ -538,7 +544,7 @@ some bugs, which is good -- we'll uncover them with our tests.
=item 3
You can actually take this one step further and test the manual
-itself. Have a look at B<Test::Inline> (formerly B<Pod::Tests>).
+itself. Have a look at L<Test::Inline> (formerly L<Pod::Tests>).
=item 4
@@ -552,7 +558,7 @@ We'll get to testing the contents of lists later.
But what happens if your test program dies halfway through?! Since we
didn't say how many tests we're going to run, how can we know it
-failed? No problem, Test::More employs some magic to catch that death
+failed? No problem, C<Test::More> employs some magic to catch that death
and turn the test into a failure, even if every test passed up to that
point.
View
11 t/lib/Test/Builder/NoOutput.pm
@@ -3,6 +3,7 @@ package Test::Builder::NoOutput;
use strict;
use warnings;
+use Symbol qw(gensym);
use base qw(Test::Builder);
@@ -59,15 +60,17 @@ sub create {
$self->{_outputs} = \%outputs;
require Test::Builder::Tee;
- tie *OUT, "Test::Builder::Tee", \$outputs{all}, \$outputs{out};
- tie *ERR, "Test::Builder::Tee", \$outputs{all}, \$outputs{err};
+ my($out, $err) = map { gensym() } 1..2;
+ tie *$out, "Test::Builder::Tee", \$outputs{all}, \$outputs{out};
+ tie *$err, "Test::Builder::Tee", \$outputs{all}, \$outputs{err};
- $self->output(*OUT);
- $self->failure_output(*ERR);
+ $self->output($out);
+ $self->failure_output($err);
return $self;
}
+
sub read {
my $self = shift;
my $stream = @_ ? shift : 'all';
View
57 t/subtest/bail_out.t
@@ -0,0 +1,57 @@
+#!/usr/bin/perl -w
+
+BEGIN {
+ if( $ENV{PERL_CORE} ) {
+ chdir 't';
+ @INC = ('../lib', 'lib');
+ }
+ else {
+ unshift @INC, 't/lib';
+ }
+}
+
+my $Exit_Code;
+BEGIN {
+ *CORE::GLOBAL::exit = sub { $Exit_Code = shift; };
+}
+
+use Test::Builder;
+use Test::More;
+
+my $output;
+my $TB = Test::More->builder;
+$TB->output(\$output);
+
+my $Test = Test::Builder->create;
+$Test->level(0);
+
+$Test->plan(tests => 2);
+
+plan tests => 4;
+
+ok 'foo';
+subtest 'bar' => sub {
+ plan tests => 3;
+ ok 'sub_foo';
+ subtest 'sub_bar' => sub {
+ plan tests => 3;
+ ok 'sub_sub_foo';
+ ok 'sub_sub_bar';
+ BAIL_OUT("ROCKS FALL! EVERYONE DIES!");
+ ok 'sub_sub_baz';
+ };
+ ok 'sub_baz';
+};
+
+$Test->is_eq( $output, <<'OUT' );
+1..4
+ok 1
+ 1..3
+ ok 1
+ 1..3
+ ok 1
+ ok 2
+Bail out! ROCKS FALL! EVERYONE DIES!
+OUT
+
+$Test->is_eq( $Exit_Code, 255 );
View
25 t/subtest/threads.t
@@ -0,0 +1,25 @@
+#!/usr/bin/perl -w
+
+use strict;
+use warnings;
+
+use Config;
+BEGIN {
+ unless ( $] >= 5.008001 && $Config{'useithreads'} &&
+ eval { require threads; 'threads'->import; 1; })
+ {
+ print "1..0 # Skip: no working threads\n";
+ exit 0;
+ }
+}
+
+use Test::More;
+
+subtest 'simple test with threads on' => sub {
+ is( 1+1, 2, "simple test" );
+ is( "a", "a", "another simple test" );
+};
+
+pass("Parent retains sharedness");
+
+done_testing(2);
View
24 t/subtest/wstat.t
@@ -0,0 +1,24 @@
+#!/usr/bin/perl -w
+
+# Test that setting $? doesn't affect subtest success
+
+use strict;
+use Test::More;
+
+subtest foo => sub {
+ plan tests => 1;
+ $? = 1;
+ pass('bar');
+};
+
+is $?, 1, "exit code keeps on from a subtest";
+
+subtest foo2 => sub {
+ plan tests => 1;
+ pass('bar2');
+ $? = 1;
+};
+
+is $?, 1, "exit code keeps on from a subtest";
+
+done_testing(4);
View
82 t/use_ok.t
@@ -1,38 +1,34 @@
#!/usr/bin/perl -w
-BEGIN {
- if( $ENV{PERL_CORE} ) {
- chdir 't';
- @INC = qw(../lib ../lib/Test/Simple/t/lib);
- }
- else {
- unshift @INC, 't/lib';
- }
-}
+use strict;
+use warnings;
-use Test::More tests => 15;
+use lib 't/lib';
+use Test::More;
-# Using Symbol because it's core and exports lots of stuff.
-{
+note "Basic use_ok"; {
package Foo::one;
::use_ok("Symbol");
::ok( defined &gensym, 'use_ok() no args exports defaults' );
}
-{
+
+note "With one arg"; {
package Foo::two;
::use_ok("Symbol", qw(qualify));
::ok( !defined &gensym, ' one arg, defaults overridden' );
::ok( defined &qualify, ' right function exported' );
}
-{
+
+note "Multiple args"; {
package Foo::three;
::use_ok("Symbol", qw(gensym ungensym));
::ok( defined &gensym && defined &ungensym, ' multiple args' );
}
-{
+
+note "Defining constants"; {
package Foo::four;
my $warn; local $SIG{__WARN__} = sub { $warn .= shift; };
::use_ok("constant", qw(foo bar));
@@ -40,16 +36,19 @@ use Test::More tests => 15;
::is( $warn, undef, 'no warning');
}
-{
+
+note "use Module VERSION"; {
package Foo::five;
::use_ok("Symbol", 1.02);
}
-{
+
+note "use Module VERSION does not call import"; {
package Foo::six;
::use_ok("NoExporter", 1.02);
}
+
{
package Foo::seven;
local $SIG{__WARN__} = sub {
@@ -59,9 +58,56 @@ use Test::More tests => 15;
::use_ok("Test::More", 0.47);
}
-{
+
+note "Signals are preserved"; {
package Foo::eight;
local $SIG{__DIE__};
::use_ok("SigDie");
::ok(defined $SIG{__DIE__}, ' SIG{__DIE__} preserved');
}
+
+
+note "strict works"; {
+ no strict;
+
+ {
+ BEGIN { use_ok 'strict' }
+ ok !eval { ()=@{"!#%^"}; 1 }, 'use_ok with pragma';
+ }
+
+ ok eval { ()=@{"!#%^"}; 1 }, 'pragmata enabled by use_ok are lexical';
+}
+
+
+note "strict works with a version check"; {
+ no strict;
+
+ BEGIN { use_ok 'strict', 1 }
+ ok !eval { ()=@{"!#%^"}; 1 }, 'use_ok with pragma and version';
+}
+
+
+note "Line numbers preserved"; {
+ my $package = "that_cares_about_line_numbers";
+
+ # Store the output of caller.
+ my @caller;
+ {
+ package that_cares_about_line_numbers;
+
+ sub import {
+ @caller = caller;
+ return;
+ }
+
+ $INC{"$package.pm"} = 1; # fool use into thinking it's already loaded
+ }
+
+ ::use_ok($package);
+ my $line = __LINE__-1;
+ ::is( $caller[0], __PACKAGE__, "caller package preserved" );
+ ::is( $caller[1], __FILE__, " file" );
+ ::is( $caller[2], $line, " line" );
+}
+
+done_testing;
Please sign in to comment.
Something went wrong with that request. Please try again.