Skip to content

Commit

Permalink
modernized tests
Browse files Browse the repository at this point in the history
  • Loading branch information
xrotwang committed Apr 21, 2021
1 parent b8acfa7 commit 36e4f78
Show file tree
Hide file tree
Showing 10 changed files with 51 additions and 73 deletions.
4 changes: 2 additions & 2 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@


@pytest.fixture
def git_repo(tmpdir):
return get_test_repo(str(tmpdir), remote_url='https://github.com/lexibank/dataset.git')
def git_repo(tmp_path):
return get_test_repo(tmp_path, remote_url='https://github.com/lexibank/dataset.git')


@pytest.fixture
Expand Down
43 changes: 15 additions & 28 deletions tests/test_commands.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import shlex
import logging
import pathlib
import argparse

import pytest
Expand All @@ -24,12 +23,10 @@ def _main(cmd, **kw):
main(['--no-config'] + shlex.split(cmd), **kw)


def test_makecldf_concepticon_concepts(repos, tmpdir):
def test_makecldf_concepticon_concepts(repos):
d = repos / 'datasets' / 'test_dataset_concepticon_concepts'
_main('lexibank.makecldf {0} --glottolog {1} --concepticon {1} --clts {1}'.format(
str(d / 'tdcc.py'),
str(repos),
))
d / 'tdcc.py', repos))
assert d.joinpath('cldf', 'parameters.csv').read_text(encoding='utf8').splitlines()[0] == \
'ID,Name,Concepticon_ID,Concepticon_Gloss,NUMBER,ENGLISH,CHINESE,PAGE'

Expand All @@ -50,7 +47,7 @@ def test_makecldf_multi_profiles(repos):
assert 'FREQUENCY' in (d / 'etc' / 'orthography' / 'p1.tsv').read_text(encoding='utf8')


def test_makecldf(repos, dataset, dataset_cldf, dataset_no_cognates, sndcmp, tmpdir, capsys):
def test_makecldf(repos, dataset, dataset_cldf, dataset_no_cognates, sndcmp, capsys, tmp_path):
_main('lexibank.makecldf {0} --glottolog {1} --concepticon {1} --clts {1}'.format(
str(dataset.dir / 'td.py'),
str(repos),
Expand Down Expand Up @@ -89,7 +86,7 @@ def test_makecldf(repos, dataset, dataset_cldf, dataset_no_cognates, sndcmp, tmp
str(dataset_no_cognates.dir / 'tdn.py'),
str(repos),
str(repos),
str(tmpdir.join('db')),
str(tmp_path / 'db'),
))


Expand All @@ -108,25 +105,16 @@ def test_check_lexibank(dataset_cldf, caplog):
assert any('Cross-concept' in w for w in warnings)


def test_ls(repos, tmpdir, dataset):
def test_ls(repos, tmp_path, dataset):
_main('lexibank.load --db {3} {0} --glottolog {1} --concepticon {2}'.format(
str(dataset.dir / 'td.py'),
str(repos),
str(repos),
str(tmpdir.join('db')),
))
_main('lexibank.ls {0} --all --db {1}'.format(
str(dataset.dir / 'td.py'),
str(tmpdir.join('db'))))
_main('lexibank.unload --db {1} {0}'.format(
str(dataset.dir / 'td.py'),
str(tmpdir.join('db')),
))
dataset.dir / 'td.py', repos, repos, tmp_path / 'db'))
_main('lexibank.ls {0} --all --db {1}'.format(dataset.dir / 'td.py', tmp_path / 'db'))
_main('lexibank.unload --db {1} {0}'.format(dataset.dir / 'td.py', tmp_path / 'db'))


def test_db(tmpdir, mocker):
def test_db(tmp_path, mocker):
mocker.patch('pylexibank.commands.db.subprocess', mocker.Mock(return_value=0))
_main('lexibank.db --db {0}'.format(str(tmpdir.join('db'))))
_main('lexibank.db --db {0}'.format(tmp_path / 'db'))


def test_check_phonotactics(dataset):
Expand All @@ -151,13 +139,12 @@ def test_readme(dataset, repos):
assert '# Contributors' in dataset.dir.joinpath('README.md').read_text(encoding='utf8')


def test_new(tmpdir, mocker):
def test_new(tmp_path, mocker):
mocker.patch('cldfbench.metadata.input', mocker.Mock(return_value='abc'))
_main('new --template lexibank_simple --out ' + str(tmpdir))
assert pathlib.Path(str(tmpdir)).joinpath('abc', 'CONTRIBUTORS.md').exists()
_main('new --template lexibank_simple --out ' + str(tmp_path))
assert tmp_path.joinpath('abc', 'CONTRIBUTORS.md').exists()

mocker.patch('cldfbench.metadata.input', mocker.Mock(return_value='cde'))
_main('new --template lexibank_combined --out ' + str(tmpdir))
assert '{{' not in pathlib.Path(str(tmpdir)).joinpath(
'cde', 'lexibank_cde.py').read_text(encoding='utf8')
_main('new --template lexibank_combined --out ' + str(tmp_path))
assert '{{' not in tmp_path.joinpath('cde', 'lexibank_cde.py').read_text(encoding='utf8')

5 changes: 2 additions & 3 deletions tests/test_dataset.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import sys
import json
import importlib
import pathlib

import pytest
from clldutils.path import sys_path
Expand Down Expand Up @@ -43,8 +42,8 @@ class Test(Dataset):
Test()


def test_Dataset_tokenizer(tmpdir):
etc = pathlib.Path(str(tmpdir)).joinpath('etc')
def test_Dataset_tokenizer(tmp_path):
etc = tmp_path / 'etc'
etc.mkdir()
orth_dir = etc.joinpath('orthography')
orth_dir.mkdir()
Expand Down
14 changes: 8 additions & 6 deletions tests/test_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@
from pylexibank.db import Database, ColSpec, schema


@pytest.fixture
def db(tmp_path):
return Database(tmp_path / 'lexibank.sqlite')


def test_ColSpec():
col = ColSpec(name='c', csvw_type='float')
assert col.convert(5) == '5'
Expand All @@ -24,8 +29,7 @@ def test_schema(cldf_dataset):
assert False


def test_db(tmpdir, dataset, mocker, capsys):
db = Database(str(tmpdir.join('lexibank.sqlite')))
def test_db(dataset, mocker, capsys, db):
db.load(dataset)
db.create(exists_ok=True)
with pytest.raises(ValueError):
Expand Down Expand Up @@ -58,8 +62,7 @@ def test_db(tmpdir, dataset, mocker, capsys):
db.load(dataset)


def test_db_multiple_datasets(tmpdir, dataset, dataset_cldf, dataset_cldf_capitalisation, capsys):
db = Database(str(tmpdir.join('lexibank.sqlite')))
def test_db_multiple_datasets(dataset, dataset_cldf, dataset_cldf_capitalisation, capsys, db):
db.load(dataset)
db.load(dataset_cldf, verbose=True)
db.load(dataset_cldf_capitalisation, verbose=True)
Expand All @@ -71,11 +74,10 @@ def test_db_multiple_datasets(tmpdir, dataset, dataset_cldf, dataset_cldf_capita
assert ('1', 'abc') in [(r[0], r[1]) for r in res]


def test_db_multiple_datasets_error(tmpdir, dataset, dataset_factory):
def test_db_multiple_datasets_error(dataset, dataset_factory, db):
import shutil
from clldutils.jsonlib import load, dump

db = Database(str(tmpdir.join('lexibank.sqlite')))
assert not db.fname.exists()
db.load(dataset)

Expand Down
8 changes: 4 additions & 4 deletions tests/test_profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ def test_augment(clts):
assert prf.graphemes['^a']['SCA'] == 'S'


def test_write(tmpdir):
fname = pathlib.Path(str(tmpdir)) / 'profile.tsv'
def test_write(tmp_path):
fname = tmp_path / 'profile.tsv'
prf = Profile({'Grapheme': 'ab', 'IPA': 'z'}, {'Grapheme': 'x', 'IPA': 'y'})
prf.write(fname)
assert Profile.from_file(fname).graphemes == prf.graphemes
Expand All @@ -62,8 +62,8 @@ def test_clean(clts):
assert prf.graphemes['b']['IPA'] == 'b/tɬ'


def test_check(caplog, tmpdir, clts):
prf_path = pathlib.Path(str(tmpdir)) / 'profile.tsv'
def test_check(caplog, tmp_path, clts):
prf_path = tmp_path / 'profile.tsv'

prf_path.write_text('Grapheme\tIPA\na\tx\na\tx\n')
prf = Profile.from_file(prf_path)
Expand Down
8 changes: 3 additions & 5 deletions tests/test_providers_abvd.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,18 @@
import pytest
from pathlib import Path

from clldutils.path import copytree
from pycldf.sources import Source

from pylexibank.providers import abvd


@pytest.fixture
def abvd_dataset(repos, tmpdir, glottolog, concepticon):
copytree(repos / 'datasets' / 'abvd', str(tmpdir.join('abvd')))
def abvd_dataset(repos, tmp_path, glottolog, concepticon):
copytree(repos / 'datasets' / 'abvd', tmp_path / 'abvd')

class Dataset(abvd.BVD):
id = 'x'
SECTION = 'y'
dir = Path(str(tmpdir.join('abvd')))
dir = tmp_path / 'abvd'

return Dataset(glottolog=glottolog, concepticon=concepticon)

Expand Down
6 changes: 2 additions & 4 deletions tests/test_providers_clld.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import pathlib

from pylexibank.providers.clld import CLLD


def test_CLLD(mocker, repos, tmpdir):
def test_CLLD(mocker, repos, tmp_path):
class Response(mocker.Mock):
def iter_content(self, *args, **kw):
print(repos.joinpath('wold_dataset.cldf.zip'))
Expand All @@ -12,7 +10,7 @@ def iter_content(self, *args, **kw):

class WOLD(CLLD):
id = 'wold'
dir = pathlib.Path(str(tmpdir))
dir = tmp_path

ds = WOLD()
assert ds.url()
Expand Down
26 changes: 10 additions & 16 deletions tests/test_providers_sndcmp.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,18 @@


@pytest.fixture
def sndcmp_dataset(repos, tmpdir, glottolog, concepticon):
def sndcmp_dir(tmp_path, repos):
copytree(repos / 'datasets' / 'sndcmp', tmp_path / 'sndcmp')
return tmp_path / 'sndcmp'

copytree(repos / 'datasets' / 'sndcmp', str(tmpdir.join('sndcmp')))

@pytest.fixture
def sndcmp_dataset(repos, glottolog, concepticon, sndcmp_dir):
class CustomConcept(SNDCMPConcept):
Bislama_Gloss = attr.ib(default=None)

class Dataset(SNDCMP):
dir = str(tmpdir.join('sndcmp'))
dir = sndcmp_dir
id = "sndcmpvanuatu"
study_name = "Vanuatu"
second_gloss_lang = "Bislama"
Expand All @@ -40,12 +43,9 @@ def get_source_id_array(self, lexeme):


@pytest.fixture
def sndcmp2_dataset(repos, tmpdir, glottolog, concepticon):

copytree(repos / 'datasets' / 'sndcmp', str(tmpdir.join('sndcmp')))

def sndcmp2_dataset(repos, glottolog, concepticon, sndcmp_dir):
class Dataset(SNDCMP):
dir = str(tmpdir.join('sndcmp'))
dir = sndcmp_dir
id = "sndcmpvanuatu"
study_name = "Vanuatu"
source_id_array = ["Shimelman2019"]
Expand All @@ -55,12 +55,9 @@ class Dataset(SNDCMP):


@pytest.fixture
def sndcmp_dl_dataset(repos, tmpdir, glottolog, concepticon):

copytree(repos / 'datasets' / 'sndcmp', str(tmpdir.join('sndcmp')))

def sndcmp_dl_dataset(repos, glottolog, concepticon, sndcmp_dir):
class Dataset(SNDCMP):
dir = str(tmpdir.join('sndcmp'))
dir = sndcmp_dir
id = "sndcmpbrazil"
study_name = "Brazil"
second_gloss_lang = None
Expand All @@ -71,7 +68,6 @@ class Dataset(SNDCMP):


def test_sndcmp(sndcmp_dataset, mocker):

sndcmp_dataset.cmd_create_ref_etc_files(mocker.MagicMock())
assert (sndcmp_dataset.raw_dir / 'languages.csv').exists()
assert (sndcmp_dataset.raw_dir / 'concepts.csv').exists()
Expand All @@ -83,7 +79,6 @@ def test_sndcmp(sndcmp_dataset, mocker):


def test_sndcmp2(sndcmp2_dataset, mocker):

sndcmp2_dataset.cmd_create_ref_etc_files(mocker.MagicMock())
csv = sndcmp2_dataset.raw_dir / 'concepts.csv'
res = list(reader(csv, dicts=True))
Expand All @@ -96,7 +91,6 @@ def test_sndcmp_cldf(sndcmp_dataset, mocker):


def test_sndcmp_dl(sndcmp_dl_dataset, mocker):

class Requests(mocker.Mock):
def get(self, *args, **kw):
if 'zip' in args[0]:
Expand Down
6 changes: 3 additions & 3 deletions tests/test_providers_tob.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@
'</div>'


def test_TOB(tmpdir, mocker, concepticon, glottolog):
def test_TOB(tmp_path, mocker, concepticon, glottolog):
class DS(TOB):
dir = Path(str(tmpdir))
dir = tmp_path
id = 'test'
name = 'name'
dset = 'dset'

tmpdir.join('metadata.json').write_text('{"conceptlist": "Wang-2004-471"}', encoding='utf8')
tmp_path.joinpath('metadata.json').write_text('{"conceptlist": "Wang-2004-471"}', encoding='utf8')

class Requests(mocker.Mock):
def get(self, *args, **kw):
Expand Down
4 changes: 2 additions & 2 deletions tests/test_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ def test_iter_repl(seq, subseq, repl, out):
assert list(util.iter_repl(seq, subseq, repl)) == out


def test_jsondump(tmpdir):
fname = str(tmpdir.join('dump.json'))
def test_jsondump(tmp_path):
fname = tmp_path / 'dump.json'
res = util.jsondump({'a': 2}, fname)
assert 'a' in res
res = util.jsondump({'b': 3}, fname)
Expand Down

0 comments on commit 36e4f78

Please sign in to comment.