Skip to content

Commit

Permalink
use 'na' instead of 'skip' for not applicable in this_tests
Browse files Browse the repository at this point in the history
  • Loading branch information
stas00 committed Mar 3, 2019
1 parent 8332a84 commit c33edcd
Show file tree
Hide file tree
Showing 16 changed files with 43 additions and 42 deletions.
7 changes: 4 additions & 3 deletions docs/dev/test.md
Expand Up @@ -467,7 +467,7 @@ def test_this_tests():
this_tests('fastai.gen_doc.doctest.this_tests')

# special case for cases where a test doesn't test fastai API
this_tests('skip')
this_tests('na')

# not a real function
func = 'foo bar'
Expand Down Expand Up @@ -507,11 +507,12 @@ def test_get_preds():
You can make the call `this_tests` anywhere in the test, so if the object becomes available at line 10 of the test, add `this_tests` after it.
And there is a special case for situations where a test doesn't test fastai API or it's a non-callable attribute, e.g. `learn.data`:
And there is a special case for situations where a test doesn't test fastai API or it's a non-callable attribute, e.g. `learn.loss_func`, in which case use `na` (not applicable):
```
def test_non_fastai_func():
this_tests('skip')
this_tests('na')
```
But we still want the call to be there, since we run a check to make sure we don't miss out on any tests, hence each test needs to have this call.
The test registry is located at `fastai/test_api_db.json` and it gets auto-generated when `pytest` gets a `--testapireg` flag, which is currently done when `make test-full` is run.
Expand Down
2 changes: 1 addition & 1 deletion fastai/gen_doc/doctest.py
Expand Up @@ -32,7 +32,7 @@ def this_tests(*funcs):
parent_func_lineno, _ = get_parent_func(lineno, get_lines(file_name))
entry = {'file': relative_test_path(file_name), 'test': test_name , 'line': parent_func_lineno}
for func in funcs:
if func == 'skip':
if func == 'na':
# special case when we can't find a function to declare, e.g.
# when attributes are tested
continue
Expand Down
2 changes: 1 addition & 1 deletion tests/test_basic_data.py
Expand Up @@ -75,7 +75,7 @@ def test_DataBunch_show_batch(capsys):
## data.export()

def test_DeviceDataLoader_getitem():
this_tests('skip')
this_tests('na')
class DictDataset(Dataset):
def __getitem__(self, idx):
return {"a":np.ones((3,)),"b":np.zeros((2,))}
Expand Down
4 changes: 2 additions & 2 deletions tests/test_batchnom_issue_minimal.py
Expand Up @@ -14,10 +14,10 @@ def _run_batch_size_test(bs):
# This test will fail as the last batch will have a size of 1
@pytest.mark.skip
def test_batch_size_4():
this_tests('skip')
this_tests('na')
_run_batch_size_test(4)

# This test succeeds
def test_batch_size_3():
this_tests('skip')
this_tests('na')
_run_batch_size_test(3)
8 changes: 4 additions & 4 deletions tests/test_data_block.py
Expand Up @@ -5,7 +5,7 @@
def chk(a,b): assert np.array_equal(a,b)

def test_category():
this_tests('skip')
this_tests('na')
c1 = [1,3,2,3,1]
c2 = list('cabbc')
df = pd.DataFrame(dict(c1=c1,c2=c2))
Expand Down Expand Up @@ -47,7 +47,7 @@ def test_category():
chk([o.data for o in y], exp)

def test_multi_category():
this_tests('skip')
this_tests('na')
c1 = [1,3,2,3,1]
c2 = ['c a', 'a b', 'b c', '', 'a']
c2_exp = ['c;a', 'a;b', 'b;c', '', 'a']
Expand Down Expand Up @@ -121,14 +121,14 @@ def test_split_subsets():
ItemList(range(10)).split_subsets(train_size=0.5, valid_size=0.0).label_const(0)

def test_regression():
this_tests('skip')
this_tests('na')
df = pd.DataFrame({'x':range(100), 'y':np.random.rand(100)})
data = ItemList.from_df(df, path='.', cols=0).random_split_by_pct().label_from_df(cols=1).databunch()
assert data.c==1
assert isinstance(data.valid_ds, LabelList)

def test_wrong_order():
this_tests('skip')
this_tests('na')
path = untar_data(URLs.MNIST_TINY)
with pytest.raises(Exception):
src = ImageList.from_folder(path).label_from_folder().split_by_folder()
Expand Down
2 changes: 1 addition & 1 deletion tests/test_gen_doc_nbtest.py
Expand Up @@ -92,7 +92,7 @@ def test_this_tests():
this_tests('fastai.gen_doc.doctest.this_tests')

# special case for situations where a test doesn't test fastai API or non-callable attribute
this_tests('skip')
this_tests('na')

# not a real function
func = 'foo bar'
Expand Down
2 changes: 1 addition & 1 deletion tests/test_metrics.py
Expand Up @@ -132,7 +132,7 @@ def on_epoch_end(self, **kwargs):
self.metric = torch.tensor(dummy_base_val**self.epoch)

def test_custom_metric_class():
this_tests('skip')
this_tests('na')
learn = fake_learner(3,2)
learn.metrics.append(DummyMetric())
with CaptureStdout() as cs: learn.fit_one_cycle(2)
Expand Down
8 changes: 4 additions & 4 deletions tests/test_tabular_train.py
Expand Up @@ -28,7 +28,7 @@ def test_accuracy(learn):
assert learn.validate()[1] > 0.7

def test_same_categories(learn):
this_tests('skip')
this_tests('na')
x_train,y_train = learn.data.train_ds[0]
x_valid,y_valid = learn.data.valid_ds[0]
x_test,y_test = learn.data.test_ds[0]
Expand All @@ -39,7 +39,7 @@ def test_same_categories(learn):
assert np.all(x_train.classes[key] == x_test.classes[key])

def test_same_fill_nan(learn):
this_tests('skip')
this_tests('na')
df = pd.read_csv(path/'adult.csv')
nan_idx = np.where(df['education-num'].isnull())
val = None
Expand All @@ -56,7 +56,7 @@ def test_normalize(learn):
df = pd.read_csv(path/'adult.csv')
train_df = df.iloc[0:800].append(df.iloc[1000:])
c = 'age'
this_tests('skip')
this_tests('na')
mean, std = train_df[c].mean(), train_df[c].std()
for i in np.random.randint(0,799, (20,)):
x,y = learn.data.train_ds[i]
Expand All @@ -69,7 +69,7 @@ def test_normalize(learn):
assert np.abs(x.conts[0] - (df.loc[i, c] - mean) / (1e-7 + std)) < 1e-6

def test_empty_cont():
this_tests('skip')
this_tests('na')
df = pd.read_csv(path/'adult.csv')
procs = [FillMissing, Categorify, Normalize]
dep_var = 'salary'
Expand Down
4 changes: 2 additions & 2 deletions tests/test_text_data.py
Expand Up @@ -121,7 +121,7 @@ def test_backwards_cls_databunch():
assert any([orig in as_text for orig in orig_texts]) # batch samples contain BOS and optionally PAD tokens

def df_test_collate(data):
this_tests('skip')
this_tests('na')
x,y = next(iter(data.train_dl))
assert x.size(0) == 8
assert x[0,-1] == 1
Expand Down Expand Up @@ -172,7 +172,7 @@ def test_from_ids_works_for_variable_length_sentences():
valid_ids=ids, valid_lbls=lbl, classes={0:0}, bs=8)

def test_regression():
this_tests('skip')
this_tests('na')
path = untar_data(URLs.IMDB_SAMPLE)
df = text_df([0., 1.])
data = (TextList.from_df(df, path, cols='text')
Expand Down
4 changes: 2 additions & 2 deletions tests/test_text_languagemodelpreloader.py
Expand Up @@ -52,7 +52,7 @@ def verify_datadirection( bs,seq_len,sentence_len, iterations,minTokens, backwar
assert np.all(batches[ix-1,-1]+diff == batches[ix,0]), f"last token i row-1 {batches[ix-1,-1]}+{diff} must be equal to first element in row:{batches[ix,0]}"

def test_forward_minibatch():
this_tests('skip')
this_tests('na')
bs = 4
seq_len = 3
sentence_len = 20*seq_len
Expand All @@ -61,7 +61,7 @@ def test_forward_minibatch():
verify_datadirection( bs, seq_len, sentence_len, iterations, minTokens, backwards=False, nbTests=1000)

def test_backwards_minibatch():
this_tests('skip')
this_tests('na')
bs = 4
seq_len = 3
sentence_len = 20*seq_len
Expand Down
6 changes: 3 additions & 3 deletions tests/test_text_train.py
Expand Up @@ -39,7 +39,7 @@ def learn():
def n_params(learn): return sum([len(pg['params']) for pg in learn.opt.opt.param_groups])

def test_opt_params(learn):
this_tests('skip')
this_tests('na')
learn.freeze()
assert n_params(learn) == 2
learn.unfreeze()
Expand Down Expand Up @@ -81,7 +81,7 @@ def test_qrnn_works_if_split_fn_provided():
assert learn.validate()[1] > 0.3

def test_vocabs(learn):
this_tests('skip')
this_tests('na')
for ds in [learn.data.valid_ds, learn.data.test_ds]:
assert len(learn.data.train_ds.vocab.itos) == len(ds.vocab.itos)
assert np.all(learn.data.train_ds.vocab.itos == ds.vocab.itos)
Expand Down Expand Up @@ -125,7 +125,7 @@ def clean_destroy_block():

@pytest.mark.skip(reason="fix me")
def test_mem_leak():
this_tests('skip')
this_tests('na')
gc.collect()
garbage_before = len(gc.garbage) # should be 0 already, or something leaked earlier
assert garbage_before == 0
Expand Down
2 changes: 1 addition & 1 deletion tests/test_torch_core.py
Expand Up @@ -107,7 +107,7 @@ def test_calc_loss():
assert isclose(loss.sum(),6.23,abs_tol=1e-2), "final loss without reduction does not seem to be correct"

def test_tensor_array_monkey_patch():
this_tests('skip')
this_tests('na')
t = torch.ones(a)
t = np.array(t)
assert np.all(t == t), "Tensors did not properly convert to numpy arrays"
Expand Down
2 changes: 1 addition & 1 deletion tests/test_utils_fastai.py
Expand Up @@ -2,5 +2,5 @@
from fastai.gen_doc.doctest import this_tests

def test_has_version():
this_tests('skip')
this_tests('na')
assert fastai.__version__
22 changes: 11 additions & 11 deletions tests/test_utils_links.py
Expand Up @@ -13,52 +13,52 @@ def build_nb_cells(mod_names):

@pytest.mark.skip(reason="need to update")
def test_torchvision():
this_tests('skip')
this_tests('na')
docstr = 'Note that `tvm` is the namespace we use for `torchvision.models`.'
expected = 'Note that [`tvm`](https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models) is the namespace we use for `torchvision.models`.'
assert_link(docstr, expected, msg='Should match imported aliases')

def test_fastai_prefix():
this_tests('skip')
this_tests('na')
docstr = "functions for your application (`fastai.vision`)"
expected = "functions for your application ([`fastai.vision`](/vision.html#vision))"
assert_link(docstr, expected, msg='Should match keywords prefixed with fastai. See `index.ipynb`')

def test_link_typedef():
this_tests('skip')
this_tests('na')
docstr = "- `LayerFunc` = `Callable`\[`nn.Module`],`None`]"
expected = "- `LayerFunc` = `Callable`\[[`nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)],`None`]"
assert_link(docstr, expected, modules=[torch], msg='Type definitions to torch formatted incorrectly. See fastai_typing.ipynb')

def test_link_typedef_double_bt():
this_tests('skip')
this_tests('na')
docstr = "- `ParamList` = `Collection`\[`nn`.`Parameter`]"
expected = "- `ParamList` = `Collection`\[[`nn`](https://pytorch.org/docs/stable/nn.html#torch-nn).`Parameter`]"
assert_link(docstr, expected)

def test_link_inner_class_functions():
this_tests('skip')
this_tests('na')
docstr = "To train your model in mixed precision you just have to call `Learner.to_fp16`, which converts the model and modifies the existing `Learner` to add `MixedPrecision`."
expected = "To train your model in mixed precision you just have to call [`Learner.to_fp16`](/train.html#to_fp16), which converts the model and modifies the existing [`Learner`](/basic_train.html#Learner) to add [`MixedPrecision`](/callbacks.fp16.html#MixedPrecision)."
imports = 'from fastai.callbacks.fp16 import *'
assert_link(docstr, expected, nb_cells=[gen_notebooks.get_code_cell(imports)])

def test_class_anchor():
this_tests('skip')
this_tests('na')
docstr = "`DataBunch.create`, `DeviceDataLoader.proc_batch`"
expected = "[`DataBunch.create`](/basic_data.html#DataBunch.create), [`DeviceDataLoader.proc_batch`](/basic_data.html#DeviceDataLoader.proc_batch)"
imports = 'from fastai.basic_train import *'
assert_link(docstr, expected, nb_cells=[gen_notebooks.get_code_cell(imports)])

def test_link_class_methods():
this_tests('skip')
this_tests('na')
docstr = "`ImageDataBunch.from_csv`"
expected = "[`ImageDataBunch.from_csv`](/vision.data.html#ImageDataBunch.from_csv)"
imports = 'from fastai.vision.data import *'
assert_link(docstr, expected, nb_cells=[gen_notebooks.get_code_cell(imports)])

def test_respects_import_order():
this_tests('skip')
this_tests('na')
docstr = "`learner`"
expected = "[`learner`](/vision.learner.html#vision.learner)"
assert_link(docstr, expected, build_nb_cells(['fastai.text', 'fastai.vision']))
Expand All @@ -67,7 +67,7 @@ def test_respects_import_order():
assert_link(docstr, expected_text, build_nb_cells(['fastai.vision', 'fastai.text']))

def test_nb_module_name_has_highest_priority():
this_tests('skip')
this_tests('na')
# get_imported_modules.nb_module_name should have highest priority. This is the associated notebook module.
# Ex: vision.transforms.ipynb is associated with fastai.vision.transforms
docstr = "`transform`"
Expand All @@ -81,7 +81,7 @@ def test_nb_module_name_has_highest_priority():

@pytest.mark.skip(reason="need to update")
def test_application_links_top_level_modules():
this_tests('skip')
this_tests('na')
# Snippet taken from applications.ipynb
docstr = """## Module structure
In each case (except for `collab`), the module is organized this way:
Expand All @@ -98,7 +98,7 @@ def test_application_links_top_level_modules():
assert_link(docstr, expected, msg='data, models should link to highest module. transform and learner links to first match')

def test_link_vision_learner_priority():
this_tests('skip')
this_tests('na')
# Edge case for vision.learner.ipynb
imports = """from fastai.gen_doc.nbdoc import *
from fastai.vision import *
Expand Down
4 changes: 2 additions & 2 deletions tests/test_utils_mod_independency.py
Expand Up @@ -76,7 +76,7 @@ def setup(**kw):

# just test first that the parsing worked
def test_setup_parser():
this_tests('skip')
this_tests('na')
assert data['name'] == 'fastai'

# print(data['extras_require'])
Expand All @@ -101,7 +101,7 @@ def find_spec(self, fullname, path, target=None):
import pytest
@pytest.mark.skip("Currently broken test")
def test_unwanted_mod_dependencies():
this_tests('skip')
this_tests('na')
# save the original state
mod_saved = sys.modules['fastai'] if 'fastai' in sys.modules else None
meta_path_saved = sys.meta_path.copy
Expand Down
6 changes: 3 additions & 3 deletions tests/test_vision_data.py
Expand Up @@ -100,18 +100,18 @@ def test_image_resize(path, path_var_size):
check_resized(data, size, args)

def test_multi_iter_broken(path):
this_tests('skip')
this_tests('na')
data = ImageDataBunch.from_folder(path, ds_tfms=(rand_pad(2, 28), []))
for i in range(2): x,y = next(iter(data.train_dl))

def test_multi_iter(path):
this_tests('skip')
this_tests('na')
data = ImageDataBunch.from_folder(path, ds_tfms=(rand_pad(2, 28), []))
data.normalize()
for i in range(2): x,y = data.one_batch()

def test_clean_tear_down(path):
this_tests('skip')
this_tests('na')
docstr = "test DataLoader iter doesn't get stuck"
data = ImageDataBunch.from_folder(path, ds_tfms=(rand_pad(2, 28), []))
data.normalize()
Expand Down

0 comments on commit c33edcd

Please sign in to comment.