diff --git a/02-assertions.ipynb b/02-assertions.ipynb new file mode 100644 index 0000000..9052a37 --- /dev/null +++ b/02-assertions.ipynb @@ -0,0 +1,281 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#Testing\n", + "##Assertions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Assertions are the simplest type of test. They are used as a tool for bounding acceptable behavior during runtime. The assert keyword in python has the following behavior:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# when the argument is true, nothing happens\n", + "assert True == True" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "scrolled": true + }, + "outputs": [], + "source": [ + "# when the argument is false, an assertion error is raised\n", + "assert True == False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "scrolled": true + }, + "outputs": [], + "source": [ + "# an error message can even be added to the assertion\n", + "assert True == False, \"True is not False\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That is, assertions halt code execution instantly if the comparison is false. It does nothing at all if the comparison is true. These are therefore a very good tool for guarding the function against foolish (e.g. human) input. A simple mean function below, results in a confusing error if the input is empty.\n", + "\n", + "### Assertions within code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def mean(num_list):\n", + " return sum(num_list)/len(num_list)\n", + "\n", + "mean([])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If we catch the error earlier, then it is more clear that the issue is not division by zero, but will point at the line above, where it is clear that the list is empty." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def mean(num_list):\n", + " assert len(num_list) != 0, \"the input is empty\"\n", + " return sum(num_list)/len(num_list)\n", + "\n", + "mean([])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The advantage of assertions is their ease of use. They are rarely more than one line of code. The disadvantage is that assertions halt execution indiscriminately and the helpfulness of the resulting error message is usually quite limited." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "mean([\"nonempty\",\"but\",\"not\", \"numbers\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "____\n", + "### Challenge: Insert an Assertion\n", + "In the following code, insert an assertion that checks whether the list is made of numbers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def mean(num_list):\n", + " assert len(num_list) != 0, \"the input is empty\"\n", + " # insert your assertion here\n", + " return sum(num_list)/len(num_list)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Does it give the right answer?\n", + "mean([1,2,3])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Does this cause it to halt with a type message?\n", + "mean([\"test\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Does this cause it to halt with an emptiness message?\n", + "mean([])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "----\n", + "### Challenge: Almost Equal\n", + "Assertions are also helpful for catching abnormal behaviors, such as those that arise with floating point arithmetic. Using the assert keyword, how could you test whether some value is almost the same as another value?\n", + "\n", + "- My package, mynum, provides the number a. \n", + "- Use the `assert` keyword to check whether the number a is greater than 2.\n", + "- Use the `assert` keyword to check whether a is equal to 2 to within 2 decimal places.\n", + "- Use the `assert` keyword to check that a is equal to 2 within an error of 0.003." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from mynum import a\n", + "# greater than 2 assertion here\n", + "# 2 decimal places assertion here\n", + "# 0.003 assertion here" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To help with situations such as those above, there are classes of more helpful assertions that we will use often in later parts of this testing lesson as the building blocks of our tests. The nose testing package contains many of them.\n", + "\n", + "___\n", + "\n", + "### Nose\n", + "\n", + "The nose testing framework has built-in assertion types implementing `assert_almost_equal`, `assert_true`, `assert_false`, `assert_raises`, `assert_is_instance`, and others.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from nose.tools import assert_almost_equal\n", + "from mynum import a\n", + "assert_almost_equal(a, 2, places=2)\n", + "assert_almost_equal(a, 2, delta=0.003)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "These assertions give much more helpful error messages and have much more powerful features than the simple assert keyword. An even more powerful sibling of the assertion is the exception. We’ll learn about those in the next lesson.\n", + "\n", + "## Key Points\n", + "- Assertions are one line tests embedded in code.\n", + "- The assert keyword is used to set an assertion.\n", + "- Assertions halt execution if the argument is false.\n", + "- Assertions do nothing if the argument is true.\n", + "- The nose.tools package provides more informative assertions.\n", + "- Assertions are the building blocks of tests." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.3.5" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/03-exceptions.ipynb b/03-exceptions.ipynb new file mode 100644 index 0000000..b6a7f6f --- /dev/null +++ b/03-exceptions.ipynb @@ -0,0 +1,235 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Testing\n", + "## Exceptions\n", + "\n", + "### Learning Objectives\n", + "- Understand that exceptions are effectively specialized runtime tests\n", + "- Learn when to use exceptions and what exceptions are available\n", + "- Exceptions are more sophisticated than assertions. They are the standard error messaging system in most modern programming languages. Fundamentally, when an error is encountered, an informative exception is ‘thrown’ or ‘raised’.\n", + "\n", + "For example, instead of the assertion in the case before, an exception can be used.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def mean(num_list):\n", + " if len(num_list) == 0 :\n", + " raise Exception(\"The algebraic mean of an empty list is undefined. \\\n", + " Please provide a list of numbers\")\n", + " else :\n", + " return sum(num_list)/len(num_list)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "Once an exception is raised, it will be passed upward in the program scope. An exception be used to trigger additional error messages or an alternative behavior. rather than immediately halting code execution, the exception can be ‘caught’ upstream with a try-except block. When wrapped in a try-except block, the exception can be intercepted before it reaches global scope and halts execution.\n", + "\n", + "To add information or replace the message before it is passed upstream, the try-catch block can be used to catch-and-reraise the exception:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def mean(num_list):\n", + " try:\n", + " return sum(num_list)/len(num_list)\n", + " except ZeroDivisionError as detail :\n", + " msg = \"The algebraic mean of an empty list is undefined. Please provide a list of numbers.\"\n", + " raise ZeroDivisionError(detail.__str__() + \"\\n\" + msg)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "Alternatively, the exception can simply be handled intelligently. If an alternative behavior is preferred, the exception can be disregarded and a responsive behavior can be implemented like so:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def mean(num_list):\n", + " try:\n", + " return sum(num_list)/len(num_list)\n", + " except ZeroDivisionError :\n", + " return 0" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "\n", + "If a single function might raise more than one type of exception, each can be caught and handled separately.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "\n", + "def mean(num_list):\n", + " try:\n", + " return sum(num_list)/len(num_list)\n", + " except ZeroDivisionError :\n", + " return 0\n", + " except TypeError as detail :\n", + " msg = \"The algebraic mean of an non-numerical list is undefined. Please provide a list of numbers.\"\n", + " raise TypeError(detail.__str__() + \"\\n\" + msg)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Challenge: What Else Can Go Wrong?\n", + "\n", + "1. Think of some other type of exception that could be raised by the try block.\n", + "2. Guard against it by adding an except clause.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def mean(num_list):\n", + " try:\n", + " return sum(num_list)/len(num_list)\n", + " except ZeroDivisionError :\n", + " return Infinity\n", + " except TypeError as detail :\n", + " msg = \"The algebraic mean of an non-numerical list is undefined. Please provide a list of numbers.\"\n", + " raise TypeError(detail.__str__() + \"\\n\" + msg)\n", + " # your except clause here:\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "### Challenge: Cause all of the errors\n", + "\n", + "- Use the mean function in three different ways, so that you cause each exceptional case" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# cause the empty list case and check that it returns infinity\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# cause the type error and check that it causes the error message\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# cause the type of exception that you guarded against in the previous challenge\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Exceptions have the advantage of being simple to include and powerfully helpful to the user. However, not all behaviors can or should be found with runtime exceptions. Most behaviors should be validated with unit tests.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Key Points\n", + "\n", + "- Exceptions are effectively specialized runtime tests\n", + "- Exceptions can be caught and handled with a try-except block\n", + "- Many built-in Exception types are available" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.3.5" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/10-fixtures.html b/10-fixtures.html new file mode 100644 index 0000000..2c90988 --- /dev/null +++ b/10-fixtures.html @@ -0,0 +1,105 @@ + + + + + + Software Carpentry: Testing + + + + + + + + + + + +
+ +
+
+
+

Testing

+

Fixtures

+
+
+

Learning Objectives

+
+
+
    +
  • Understand how test fixtures can help write tests.
  • +
+
+
+

The above example didn’t require much setup or teardown. Consider, however, the following example that could arise when comunicating with third-party programs. You have a function f() which will write a file named yes.txt to disk with the value 42 but only if a file no.txt does not exist. To truly test the function works, you would want to ensure that neither yes.txt nor no.txt existed before you ran your test. After the test, you want to clean up after yourself before the next test comes along. You could write the test, setup, and teardown functions as follows:

+
import os
+
+from nose.tools import assert_equal, with_setup
+
+from mod import f
+
+def f_setup():
+    # The f_setup() function tests ensure that neither the yes.txt nor the
+    # no.txt files exist.
+    files = os.listdir('.')
+    if 'no.txt' in files:
+        os.remove('no.txt')
+    if 'yes.txt' in files:
+        os.remove('yes.txt')
+
+def f_teardown():
+    # The f_teardown() function removes the yes.txt file, if it was created.
+    files = os.listdir('.')
+    if 'yes.txt' in files:
+        os.remove('yes.txt')
+
+def test_f():
+    # The first action of test_f() is to make sure the file system is clean.
+    f_setup()
+    exp = 42
+    f()
+    with open('yes.txt', 'r') as fhandle:
+        obs = int(fhandle.read())
+    assert_equal(exp, obd)
+    # The last action of test_f() is to clean up after itself.
+    f_teardown()
+

The above implementation of setup and teardown is usually fine. However, it does not guarantee that the f_setup() and the f_teardown() functions will be called. This is becaue an unexpected error anywhere in the body of f() or test_f() will cause the test to abort before the teardown function is reached.

+

These setup and teardown behaviors are needed when test fixtures must be created. A fixture is any environmental state or object that is required for the test to successfully run.

+

As above, a function that is executed before the test to prepare the fixture is called a setup function. One that is executed to mop-up side effects after a test is run is called a teardown function. Nose has a decorator that you can use to automatically run setup and teardown of fixtures no matter if the test succeeded, failed, or had an error.

+

To make sure that both of the functions will be executed, you must use nose’s with_setup() decorator. This decorator may be applied to any test and takes a setup and a teardown function as possible arguments. We can rewrite the test_f() to be wrapped by with_setup().

+
@with_setup(setup=f_setup, teardown=f_teardown)
+def test_f():
+    exp = 42
+    f()
+    with open('yes.txt', 'r') as fhandle:
+        obs = int(fhandle.read())
+    assert_equal(exp, obd)
+

Note that if you have functions in your test module that are simply named setup() and teardown(), each of these are called automatically when the entire test module is loaded in and finished.

+
+

Key Points

+
    +
  • It may be necessary to set up “fixtures” composing the test environment.
  • +
+
+
+
+
+ +
+ + + + + diff --git a/10-fixtures.md b/10-fixtures.md new file mode 100644 index 0000000..d425755 --- /dev/null +++ b/10-fixtures.md @@ -0,0 +1,92 @@ +--- +layout: page +title: Testing +subtitle: Fixtures +minutes: 10 +--- +> ## Learning Objectives {.objectives} +> +> - Understand how test fixtures can help write tests. + +The above example didn't require much setup or teardown. Consider, however, the +following example that could arise when comunicating with third-party programs. +You have a function `f()` which will write a file named `yes.txt` to disk with +the value 42 but only if a file `no.txt` does not exist. To truly test the +function works, you would want to ensure that neither `yes.txt` nor `no.txt` +existed before you ran your test. After the test, you want to clean up after +yourself before the next test comes along. You could write the test, setup, +and teardown functions as follows: + +~~~ {.python} +import os + +from nose.tools import assert_equal, with_setup + +from mod import f + +def f_setup(): + # The f_setup() function tests ensure that neither the yes.txt nor the + # no.txt files exist. + files = os.listdir('.') + if 'no.txt' in files: + os.remove('no.txt') + if 'yes.txt' in files: + os.remove('yes.txt') + +def f_teardown(): + # The f_teardown() function removes the yes.txt file, if it was created. + files = os.listdir('.') + if 'yes.txt' in files: + os.remove('yes.txt') + +def test_f(): + # The first action of test_f() is to make sure the file system is clean. + f_setup() + exp = 42 + f() + with open('yes.txt', 'r') as fhandle: + obs = int(fhandle.read()) + assert_equal(exp, obd) + # The last action of test_f() is to clean up after itself. + f_teardown() +~~~ + +The above implementation of setup and teardown is usually fine. +However, it does +not guarantee that the +`f_setup()` and the `f_teardown()` functions will be called. This is becaue an +unexpected error anywhere in the body of `f()` or `test_f()` will cause the +test to abort before the teardown function is reached. + +These setup and teardown behaviors are needed when _test fixtures_ must be +created. A fixture is any environmental state or object that is required for the test to successfully run. + +As above, a function that is executed before the test to prepare the fixture +is called a _setup_ function. One that is executed to mop-up side effects +after a test is run is called a _teardown_ function. Nose has a decorator that +you can use to automatically run setup and teardown of fixtures no matter if +the test succeeded, failed, or had an error. + +To make sure that both of the functions will be executed, you must use nose's +`with_setup()` decorator. This decorator may be applied to any test +and takes a setup and a teardown function as possible arguments. We can rewrite the +`test_f()` to be wrapped by `with_setup()`. + + +~~~ {.python} +@with_setup(setup=f_setup, teardown=f_teardown) +def test_f(): + exp = 42 + f() + with open('yes.txt', 'r') as fhandle: + obs = int(fhandle.read()) + assert_equal(exp, obd) +~~~ + +Note that if you have functions in your test module that are simply named +`setup()` and `teardown()`, each of these are called automatically when the +entire test module is loaded in and finished. + +> ## Key Points {.keypoints} +> +> - It may be necessary to set up "fixtures" composing the test environment. diff --git a/mean.py b/mean.py new file mode 100644 index 0000000..abd11cc --- /dev/null +++ b/mean.py @@ -0,0 +1,11 @@ +def mean(num_list): + try: + return sum(num_list)/len(num_list) + except ZeroDivisionError as detail : + msg = "The algebraic mean of an empty list is undefined." + msg += "Please provide a list of numbers." + raise ZeroDivisionError(detail.__str__() + "\n" + msg) + except TypeError as detail : + msg = "The algebraic mean of an non-numerical list is undefined." + msg += "Please provide a list of numbers." + raise TypeError(detail.__str__() + "\n" + msg) diff --git a/mod.py b/mod.py new file mode 100644 index 0000000..3e785d9 --- /dev/null +++ b/mod.py @@ -0,0 +1,24 @@ +import numpy as np + +def sinc2d(x, y): + if x == 0.0 and y == 0.0: + return 1.0 + elif x == 0.0: + return np.sin(y) / y + elif y == 0.0: + return np.sin(x) / x + else: + return (np.sin(x) / x) * (np.sin(y) / y) + +def a(x): + return x + 1 + +def b(x): + return 2 * x + +def c(x): + return b(a(x)) + +def std(vals): + # surely this is cheating... + return 1.0 diff --git a/test_mean.py b/test_mean.py new file mode 100644 index 0000000..371ac5d --- /dev/null +++ b/test_mean.py @@ -0,0 +1,35 @@ +from mean import * +import unittest + +def test_ints(): + num_list = [1,2,3,4,5] + obs = mean(num_list) + exp = 3 + assert obs == exp + +def test_zero(): + num_list=[0,2,4,6] + obs = mean(num_list) + exp = 3 + assert obs == exp + +def test_double(): + # This one will fail in Python 2 + num_list=[1,2,3,4] + obs = mean(num_list) + exp = 2.5 + assert obs == exp + +def test_long(): + big = 100000000 + obs = mean(range(1,big)) + exp = big/2.0 + assert obs == exp + +def test_complex(): + # given that complex numbers are an unordered field + # the arithmetic mean of complex numbers is meaningless + num_list = [2 + 3j, 3 + 4j, -32 - 2j] + obs = mean(num_list) + exp = NotImplemented + assert obs == exp