From 2e16eedd860f458c60a174534539d78894ab3531 Mon Sep 17 00:00:00 2001 From: Micah Johnson Date: Tue, 22 Sep 2020 21:38:34 -0600 Subject: [PATCH 01/21] Removed unittests. Used pytests. --- requirements_dev.txt | 1 + tests/test_changes.py | 8 +------- tests/test_checkers.py | 7 ++----- tests/test_cli.py | 10 +++------- tests/test_config.py | 22 +++++++++------------- tests/test_entries.py | 8 +------- tests/test_output.py | 12 +++--------- tests/test_parsing.py | 20 ++++++++------------ tests/test_tools.py | 17 ++++++----------- tests/test_utilities.py | 32 +++++++++++++------------------- 10 files changed, 47 insertions(+), 90 deletions(-) diff --git a/requirements_dev.txt b/requirements_dev.txt index 684a24c..0f83858 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -8,3 +8,4 @@ sphinx-rtd-theme==0.4.3 sphinx==1.8.5 sphinxcontrib-apidoc==0.3.0 twine==1.13.0 +pytest==6.0.2 diff --git a/tests/test_changes.py b/tests/test_changes.py index 089d64f..d804f25 100644 --- a/tests/test_changes.py +++ b/tests/test_changes.py @@ -8,13 +8,12 @@ """ import os -import unittest from inicheck.changes import * from inicheck.config import MasterConfig, UserConfig -class TestChanges(unittest.TestCase): +class TestChanges(): def test_valid_syntax(self): """ @@ -30,8 +29,3 @@ def test_valid_syntax(self): assert True except Exception as e: assert False - - -if __name__ == '__main__': - import sys - sys.exit(unittest.main()) diff --git a/tests/test_checkers.py b/tests/test_checkers.py index 6ca4b37..56d1e21 100644 --- a/tests/test_checkers.py +++ b/tests/test_checkers.py @@ -7,14 +7,11 @@ Tests for `inicheck.checkers` module. """ -import unittest - import inicheck from inicheck.checkers import * from inicheck.config import MasterConfig, UserConfig - -class TestCheckers(unittest.TestCase): +class TestCheckers(): def run_a_checker(self, valids, invalids, checker, section='basic', item='item', @@ -60,7 +57,7 @@ def run_a_checker(self, valids, invalids, checker, section='basic', assert not valid @classmethod - def setUpClass(self): + def setup_class(self): """ Create some key structures for testing """ diff --git a/tests/test_cli.py b/tests/test_cli.py index f5019e9..fbce254 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,7 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -import unittest import sys import re from os.path import dirname, abspath, join @@ -11,9 +10,10 @@ from .test_output import capture_print -class TestCLI(unittest.TestCase): +class TestCLI(): + @classmethod - def setUpClass(cls): + def setup_class(cls): cls.test_base = abspath(join(dirname(__file__), 'test_configs')) cls.master = [ join(cls.test_base, 'recipes.ini'), @@ -105,7 +105,3 @@ def test_version(self): '(exception|error)', str(current_version()), re.IGNORECASE ) assert exception_message is None - - -if __name__ == '__main__': - sys.exit(unittest.main()) diff --git a/tests/test_config.py b/tests/test_config.py index 8213b36..b550bfc 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -9,7 +9,7 @@ import datetime import os -import unittest +import pytest from inicheck.config import * from inicheck.tools import cast_all_variables, get_checkers @@ -67,10 +67,10 @@ def compare_config(generated_config, truth_config, return True -class TestUserConfig(unittest.TestCase): +class TestUserConfig(): @classmethod - def setUpClass(self): + def setup_class(self): """ """ fname = os.path.abspath( @@ -115,9 +115,9 @@ def test_apply_recipes(self): assert v in [r.name for r in self.ucfg.recipes] -class TestRecipes(unittest.TestCase): +class TestRecipes(): - def setUp(self): + def setup_method(self): self.fname = os.path.abspath(os.path.dirname(__file__) + '/test_configs/full_config.ini') @@ -233,8 +233,8 @@ def test_apply_defaults_for_has_value(self): assert 'dk_ncores' not in self.ucfg.cfg['precip'].keys() -class TestMasterConfig(unittest.TestCase): - def setUp(self): +class TestMasterConfig(): + def setup_method(self): """ Stage our truthing data here """ @@ -306,13 +306,9 @@ def test_check_types(self): parseable_line=line)}} # invalids if z == 0: - self.assertRaises(ValueError, check_types, cfg, checkers) + with pytest.raises(ValueError): + check_types(cfg, checkers) # valids else: assert check_types(cfg, checkers) - - -if __name__ == '__main__': - import sys - sys.exit(unittest.main()) diff --git a/tests/test_entries.py b/tests/test_entries.py index 1256f76..0e70308 100644 --- a/tests/test_entries.py +++ b/tests/test_entries.py @@ -7,13 +7,12 @@ Tests for `inicheck.entries` module. """ -import unittest from collections import OrderedDict from inicheck.entries import ConfigEntry, RecipeSection, TriggerEntry -class TestEntries(unittest.TestCase): +class TestEntries(): def test_trigger_entry(self): """ Tests to see if we correctly gather a trigger for a recipe @@ -71,8 +70,3 @@ def test_config(self): assert e.default == ['swe_z'] assert e.type == 'string' assert e.listed == True - - -if __name__ == '__main__': - import sys - sys.exit(unittest.main()) diff --git a/tests/test_output.py b/tests/test_output.py index 73ad1fd..eba200e 100644 --- a/tests/test_output.py +++ b/tests/test_output.py @@ -10,7 +10,6 @@ import io import os import shutil -import unittest from collections import OrderedDict from contextlib import redirect_stdout @@ -38,16 +37,16 @@ def capture_print(function_call, *args, **kwargs): return out -class TestOutput(unittest.TestCase): +class TestOutput(): @classmethod - def setUpClass(self): + def setup_class(self): base = os.path.dirname(__file__) self.ucfg = get_user_config(os.path.join(base, "test_configs/full_config.ini"), modules="inicheck") @classmethod - def tearDownClass(self): + def teardown_Class(self): """ Delete any files """ @@ -109,8 +108,3 @@ def test_non_default_print(self): # Check that we have 27 lines of info for non-defaults assert len(out.split('\n')) == 27 - - -if __name__ == '__main__': - import sys - sys.exit(unittest.main()) diff --git a/tests/test_parsing.py b/tests/test_parsing.py index 1c12d37..a783cd9 100644 --- a/tests/test_parsing.py +++ b/tests/test_parsing.py @@ -6,13 +6,12 @@ Tests for `inicheck.iniparse` module. """ - -import unittest +import pytest from inicheck.iniparse import * -class TestIniparse(unittest.TestCase): +class TestIniparse(): def test_parse_sections(self): """ @@ -55,10 +54,12 @@ def test_parse_sections(self): == ["options = [a:10]", "b:5"]) # Catch non-comment chars before the first section - self.assertRaises(Exception, parse_sections, ['a#', '#', '[test]']) + with pytest.raises(Exception): + parse_sections(['a#', '#', '[test]']) + with pytest.raises(Exception): # Catch repeat sections in config - self.assertRaises(Exception, parse_sections, ['[test]', '#', '[test]']) + parse_sections(['[test]', '#', '[test]']) def test_parse_items(self): """ @@ -120,10 +121,5 @@ def test_parse_changes(self): assert changes[1][1][0] == "removed" # Test syntax errors - d = ["section/item > new_section/item"] - self.assertRaises(ValueError, parse_changes, d) - - -if __name__ == '__main__': - import sys - sys.exit(unittest.main()) + with pytest.raises(ValueError): + parse_changes(["section/item > new_section/item"]) diff --git a/tests/test_tools.py b/tests/test_tools.py index 3bab1f0..1363049 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -7,15 +7,15 @@ Tests for `inicheck.Tools` module. """ -import unittest from collections import OrderedDict +import pytest from inicheck.tools import * -class TestTools(unittest.TestCase): +class TestTools(): @classmethod - def setUpClass(self): + def setup_class(self): base = os.path.dirname(__file__) self.ucfg = get_user_config(os.path.join(base, "test_configs/full_config.ini"), @@ -81,10 +81,10 @@ def test_get_user_config(self): path = os.path.join(base, "test_configs/full_config.ini") # check for the Exception - with self.assertRaises(IOError): + with pytest.raises(IOError): get_user_config(path) - with self.assertRaises(IOError): + with pytest.raises(IOError): get_user_config('not_a_file.ini') ucfg = get_user_config(path, modules='inicheck') @@ -95,7 +95,7 @@ def test_config_documentation(self): Confirms that we still make config documentation """ # Confirm exception when file doesnt exist - with self.assertRaises(IOError): + with pytest.raises(IOError): f = '/no/folder/exists/f.rst' config_documentation(f, modules='inicheck') @@ -106,8 +106,3 @@ def test_config_documentation(self): # Clean up os.remove(f) - - -if __name__ == '__main__': - import sys - sys.exit(unittest.main()) diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 7610227..46dfa04 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -6,16 +6,15 @@ Tests for `inicheck.utilities` module. """ - -import unittest +import pytest from inicheck.tools import get_user_config from inicheck.utilities import * -class TestUtilities(unittest.TestCase): +class TestUtilities(): @classmethod - def setUpClass(self): + def setup_class(self): base = os.path.dirname(__file__) self.ucfg = get_user_config( os.path.join(base,"test_configs/full_config.ini"), @@ -82,7 +81,7 @@ def test_find_options_in_recipes(self): # Currently there is 3 sections that are set as optional in the recipes for opt in ['gridded', 'mysql', 'csv']: - self.assertTrue(opt in choices[0]) + assert opt in choices[0] def test_get_relative_to_cfg(self): """ @@ -168,45 +167,40 @@ def test_string_date_only(self): Test the parse_date function which is used in the checks for datetime """ value = parse_date("2019-10-1") - self.assertEqual(datetime(2019, 10, 1), value) + assert datetime(2019, 10, 1) == value # Test for odd issue that came up with casting a value twice value = parse_date("2019-10-1 10:00") value = parse_date(value) - self.assertEqual(datetime(2019, 10, 1, 10), value) + assert datetime(2019, 10, 1, 10) == value def test_string_with_tz_info_in_utc(self): value = parse_date("2019-10-1 10:00 MST") - self.assertEqual(datetime(2019, 10, 1, 17), value) + assert datetime(2019, 10, 1, 17) == value def test_tz_unaware_return(self): value = parse_date("2019-10-1 10:00") - self.assertIsNone(value.tzinfo) + assert value.tzinfo is None def test_tz_unaware_return_with_tz_info_given(self): value = parse_date("2019-10-1 10:00 MST") - self.assertIsNone(value.tzinfo) + assert value.tzinfo is None def test_parse_date_datetime(self): to_convert = datetime(2019, 10, 1) value = parse_date(to_convert) - self.assertEqual(value, to_convert) + assert value == to_convert def test_parse_date_date(self): to_convert = date(2019, 10, 1) expected = datetime(2019, 10, 1) value = parse_date(to_convert) - self.assertEqual(value, expected) + assert value == expected def test_parse_date_fails_int(self): - with self.assertRaises(TypeError): + with pytest.raises(TypeError): parse_date(10) def test_parse_date_fails_with_unknown_string(self): - with self.assertRaises(TypeError): + with pytest.raises(TypeError): parse_date("10 F") - - -if __name__ == '__main__': - import sys - sys.exit(unittest.main()) From d85859c43f9078003813bb93e6ae8eb8d281bc0f Mon Sep 17 00:00:00 2001 From: Micah Johnson Date: Tue, 22 Sep 2020 22:24:46 -0600 Subject: [PATCH 02/21] Parametrized test_utilities.py --- tests/test_utilities.py | 295 +++++++++++++++++++--------------------- 1 file changed, 143 insertions(+), 152 deletions(-) diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 46dfa04..ebe925f 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -7,161 +7,152 @@ Tests for `inicheck.utilities` module. """ import pytest - +from os.path import join, dirname from inicheck.tools import get_user_config from inicheck.utilities import * - -class TestUtilities(): - @classmethod - def setup_class(self): - base = os.path.dirname(__file__) - self.ucfg = get_user_config( - os.path.join(base,"test_configs/full_config.ini"), - modules="inicheck" - ) - - def test_remove_comments(self): - """ - Test our remove comments code - """ - values = {"test": "test#comment", - "test1": "test1;comment", - "": ";full in line comment", - "testboth": "testboth; test a comment with both " - "types of # comments "} - for k, v in values.items(): - out = remove_comment(v) - - assert k == out - - def test_mk_lst(self): - """ - Test all the ways we use mk_lst - - Case A: Convert non-list to list and keep lists as lists - Case B: Convert lists to single value and keep singles as single - Case C: Don't undo lists that are acutal lists - - """ - - # Case A - for v in ['test', ['test']]: - out = mk_lst(v, unlst=False) - assert isinstance(out, list) - - # Case B - for v in ['test', ['test']]: - out = mk_lst(v, unlst=True) - assert not isinstance(out, list) - - # Case C - for v in [['test', 'test2']]: - out = mk_lst(v, unlst=True) - assert isinstance(out, list) - - def test_remove_chars(self): - """ - Test if we can remove the problematic chars - """ - out = remove_chars("\t\n my_test\t", "\t\n ") - assert '\n' not in out - assert '\t' not in out - assert ' ' not in out - - def test_find_options_in_recipes(self): - """ - Tests utilities.find_options_in_recipes which extracts choices being - made by looking at the recipes and determining which work on each other - such that they don't exist at the same time. Used in the inimake cli - """ - mcfg = self.ucfg.mcfg - choices = find_options_in_recipes(mcfg.recipes, mcfg.cfg.keys(), - "remove_section") - - # Currently there is 3 sections that are set as optional in the recipes - for opt in ['gridded', 'mysql', 'csv']: - assert opt in choices[0] - - def test_get_relative_to_cfg(self): - """ - Tests that all paths in a config can be made relative to the config - """ - p = get_relative_to_cfg(__file__, self.ucfg.filename) - assert p == '../test_utilities.py' - - def test_is_kw_matched(self): - """ - Tests utilities.is_kw_matched which is used to match items in - a config items list - """ - - # Finds test in string - assert is_kw_matched("tests_my_kw", ['tests']) - - # Finds all three in the string - assert is_kw_matched("tests_my_kw", ['tests', 'my', 'kw'], kw_count=3) - - # Finds all three in the string - assert is_kw_matched("te_my_kw", ['tests', 'my', 'kw'], kw_count=1) - - # No match at all - assert not is_kw_matched("te_ym_k", ['tests', 'my', 'kw']) - - def test_get_kw_match(self): - """ - Tests utilities.get_kw_match which looks through a list of strings with - potential matches of the keywords - """ - - test_lst = ['test_my_str', 'test_my_float', 'test_my_flowt'] - - # should find the first one only - t = get_kw_match(test_lst, ['test'], kw_count=1) - assert t == 'test_my_str' - - t = get_kw_match(test_lst, ['test', 'float'], kw_count=2) - assert t == 'test_my_float' - - def test_is_valid(self): - """ - Tests utilities.is_valid which is grossly used in the checkers that are - simple enough to pass a function to. - """ - - # Check to can handle a normal scenario - result = is_valid('10.0', float, 'float', allow_none=False) - assert result[1] is None - assert result[0] - - # Test the handling of Nones - for b in [False, True]: - result = is_valid(None, float, 'float', allow_none=b)[0] - assert b == result - - # Check to see that we return an error message - result = is_valid(None, float, 'float', allow_none=False)[1] - assert 'Value' in result - assert 'None' in result - - # Check to see that we return an error message - result = is_valid('abc', float, 'float', allow_none=False)[1] - assert 'float' in result - assert 'str' in result - - def test_get_inicheck_cmd(self): - """ - Test if the cmd used to generate the str command is working - - """ - cmd = get_inicheck_cmd( - self.ucfg.filename, - modules='inicheck', - master_files=None) - assert cmd == 'inicheck -f {} -m inicheck'.format(self.ucfg.filename) - - -class TestUtilitiesDateParse(TestUtilities): +@pytest.mark.parametrize("value, expected", [ +('test#comment', 'test'), +("test1;comment", 'test1'), +(";full in line comment", ''), +("testboth; test a comment with both types of # comments ", 'testboth'), +]) +def test_remove_comments(value, expected): + """ + Test our remove comments code + """ + out = remove_comment(value) + assert out == expected + + +@pytest.mark.parametrize("value, unlist, expected",[ +# Test we make things into lists +('test', False, True), +(['test'], False, True), +# Test we can take things out of a list +('test', True, False), +(['test'], True, False), +(['test'], True, False), +# Test we don't take lists of len > 1 out of a list +(['test', 'test2'], True, True) +]) +def test_mk_lst(value, unlist, expected): + """ + Test all the ways we use mk_lst + """ + out = mk_lst(value, unlst=unlist) + assert isinstance(out, list) == expected + + +def test_remove_chars(): + """ + Test if we can remove the problematic chars + """ + out = remove_chars("\t\n my_test\t", "\t\n ") + assert '\n' not in out + assert '\t' not in out + assert ' ' not in out + + +def test_find_options_in_recipes(): + """ + Tests utilities.find_options_in_recipes which extracts choices being + made by looking at the recipes and determining which work on each other + such that they don't exist at the same time. Used in the inimake cli + """ + base = dirname(__file__) + ucfg = get_user_config(join(base,"test_configs","full_config.ini"), + modules='inicheck') + mcfg = ucfg.mcfg + choices = find_options_in_recipes(mcfg.recipes, mcfg.cfg.keys(), + "remove_section") + + # Currently there is 3 sections that are set as optional in the recipes + for opt in ['gridded', 'mysql', 'csv']: + assert opt in choices[0] + + +def test_get_relative_to_cfg(): + """ + Tests that all paths in a config can be made relative to the config + """ + base = dirname(__file__) + ucfg = get_user_config(join(base,"test_configs","full_config.ini"), + modules='inicheck') + mcfg = ucfg.mcfg + choices = find_options_in_recipes(mcfg.recipes, mcfg.cfg.keys(), + "remove_section") + p = get_relative_to_cfg(__file__, ucfg.filename) + assert p == '../test_utilities.py' + + +@pytest.mark.parametrize('value, kw, count, expected',[ +# Finds test in string +("tests_my_kw", ['tests'], 1, True), +# Finds all three in the string +("tests_my_kw", ['tests', 'my', 'kw'], 3, True), +# Finds all 2 in the string +("te_my_kw", ['tests', 'my', 'kw'], 2, True), +# No match +("te_ym_k", ['tests', 'my', 'kw'], 1, False), +]) +def test_is_kw_matched(value, kw, count, expected): + """ + Tests utilities.is_kw_matched which is used to match items in + a config items list + """ + # Finds test in string + assert is_kw_matched(value, kw, kw_count=count) == expected + +@pytest.mark.parametrize('kw, count, expected',[ +(['test'], 1, 'test_my_str'), +(['test', 'float'], 2, 'test_my_float')]) +def test_get_kw_match(kw, count, expected): + """ + Tests utilities.get_kw_match which looks through a list of strings with + potential matches of the keywords + """ + + test_lst = ['test_my_str', 'test_my_float', 'test_my_flowt'] + + # should find the first one only + t = get_kw_match(test_lst, kw, kw_count=count) + assert t == expected + +@pytest.mark.parametrize('value, dtype, allow_none, expected', [ +# Test this is a convertible string +('10.0', float, False, [True, None]), +# Test the allow None kwarg +(None, float, False, [False, "Value cannot be None"]), +(None, float, True, [True, None]), +# Test we report an error +('abc', float, False, [False, 'Expecting float received str']), +]) +def test_is_valid(value, dtype, allow_none, expected): + """ + Tests utilities.is_valid which is grossly used in the checkers that are + simple enough to pass a function to. + """ + result = is_valid(value, dtype, dtype.__name__, allow_none=allow_none) + assert result[0] == expected[0] + assert result[1] == expected[1] + + +def test_get_inicheck_cmd(): + """ + Test if the cmd used to generate the str command is working + + """ + base = dirname(__file__) + fname = join(base,"test_configs","full_config.ini") + cmd = get_inicheck_cmd( + fname, + modules='inicheck', + master_files=None) + assert cmd == 'inicheck -f {} -m inicheck'.format(fname) + + +class TestUtilitiesDateParse(): def test_string_date_only(self): """ Test the parse_date function which is used in the checks for datetime From e9858a5aa332a256e3187761eae9260ddba32a45 Mon Sep 17 00:00:00 2001 From: micah Date: Tue, 29 Sep 2020 10:23:58 -0600 Subject: [PATCH 03/21] Parametrized test_parsing.py --- tests/test_parsing.py | 186 +++++++++++++++++++++--------------------- 1 file changed, 94 insertions(+), 92 deletions(-) diff --git a/tests/test_parsing.py b/tests/test_parsing.py index a783cd9..9cd46d6 100644 --- a/tests/test_parsing.py +++ b/tests/test_parsing.py @@ -1,20 +1,58 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" +''' test_iniparse ---------------------------------- Tests for `inicheck.iniparse` module. -""" +''' import pytest - +from collections import OrderedDict from inicheck.iniparse import * class TestIniparse(): - def test_parse_sections(self): - """ + def run_parsing_test(self, fn, info, expected, exception): + ''' + Manages testing exceptions and testing dict output + ''' + if type(info) == OrderedDict: + expected = OrderedDict(expected) + + if exception == None: + received = fn(info) + + # Test the dictionaries recieved match the expected + assert received == expected + + else: + with pytest.raises(exception): + received = fn(info) + + @pytest.mark.parametrize('info, expected, exception', [ + # Test a simple parsing + (['[s]','i:v'], {'s':['i:v']}, None), + # Test a section only without items + (['[s]'], {'s':[]}, None), + # Test every section is made lower case + (['[CASE]'], {'case':[]}, None), + # Test space remove before and after section + (['[ spaces ]'], {'spaces':[]}, None), + # Test a single line section/value is parsed + (['[single_line]i:v'], {'single_line':['i:v']}, None), + # Test some comment removal + (['#comment','[s]','i:v'], {'s':['i:v']}, None), + ([';[commented]'], {}, None), + (['[s]','i:v ;comment'], {'s':['i:v']}, None), + # Test exception with repeat sections + (['[test]', '#', '[test]'], {}, Exception), + # Test non-comment chars before the first section + (['a#', '#', '[test]'], {}, Exception), + + ]) + def test_parse_sections(self, info, expected, exception): + ''' Tests our base function used to parse sections before we do any processing @@ -23,103 +61,67 @@ def test_parse_sections(self): * we can handle comments around sections. * caps issues - """ - - info = ["# Test our line comments", - "; Test our other line comments", - ";[old section I am not interested in] test line comments on sections", - "[unit]", - "a:10; in line comment", - "[test CASe] # another inline comment test", - "b:5", - "[ spaces ]", - "test:now", - "[single_line_test]a:10", - "[single_line_test_recipe]options = [a:10]", - "b:5"] - - sections = parse_sections(info) - - # Confirm we see a normal section - assert(sections['unit'][0] == "a:10") - - # Confirm we see a section with space caps issues - assert(sections['test case'][0] == "b:5") + ''' + self.run_parsing_test(parse_sections, info, expected, exception) - assert(sections['spaces'][0] == "test:now") - # Confirm we can handle a section and an item in the same line - assert(sections['single_line_test'][0] == "a:10") - assert(sections['single_line_test_recipe'] - == ["options = [a:10]", "b:5"]) + @pytest.mark.parametrize('info, expected, exception', [ + # Test simple config item parsing + (['a:10'], {'a':'10'}, None), + # Test a value thats a list parse with a line return which should simply merge them + (['a:10,','15,20'], {'a':'10, 15, 20'}, None), + # Test interpreting master file properties that span multiple lines + (['a: default=10','options=[10 15 20]'], {'a':'default=10 options=[10 15 20]'}, None), + ]) - # Catch non-comment chars before the first section - with pytest.raises(Exception): - parse_sections(['a#', '#', '[test]']) - - with pytest.raises(Exception): - # Catch repeat sections in config - parse_sections(['[test]', '#', '[test]']) - - def test_parse_items(self): - """ - Tests our base function used to parse items after we read sections but + def test_parse_items(self, info, expected, exception): + ''' + Tests our base function used to parse items after reading sections but before processing values. * tests for wrapped lists * tests to remove bracketed lists with comments * tests for properties being added in master files - """ - - info = {"unit": ["a:10"], - "test case": [" a :10,", "15,20"], - 'recipe': ['a: default=10,', - 'options=[10 15 20]']} # Handle wrapped lines - - items = parse_items(info) - - # Check for a normal single entry parse - #print(items['test case']['a']) - assert(items['unit']['a'] == '10') - - # Check for correct interpretation of a wrapped list - assert(items['test case']['a'] == "10, 15, 20") - - # Check for a correct interpretation of item properties for master - # files - assert(items['recipe']['a'] == "default=10, options=[10 15 20]") - - def test_parse_values(self): - """ + ''' + # Assign a section to the info which are always OrderedDict + info = OrderedDict({'s': info}) + expected = {'s': OrderedDict(expected)} + self.run_parsing_test(parse_items, info, expected, exception) + + + @pytest.mark.parametrize('info, expected, exception',[ + # Test parse values that might have excess chars + ('test1,\ttest2, test3', ['test1','test2', 'test3'], None ), + # Test parsing master properties parsing + ( '\tdefault=10,\toptions=[10 15 20]', ['default=10','options=[10 15 20]'], None), + ]) + def test_parse_values(self, info, expected, exception): + ''' test parse values Ensures: - * we can clean up values with list entries - * we can clean up properties we recieve including non-comma sep lists - """ - info = {'unit': {'a': 'test1,\ttest2, test3'}, - 'recipe': {'a': '\tdefault=10,\toptions=[10 15 20]'} - } - - values = parse_values(info) - - # Check we parse a comma list correctly - assert(values['unit']['a'][1] == 'test2') - - # Check we parse a properties list with no commas correctly - assert(values['recipe']['a'][1] == 'options=[10 15 20]') - - def test_parse_changes(self): - """ + * Cleans up values with list entries + * Cleans up properties we recieve including non-comma sep lists + ''' + # Assign info to a place in a dict of dict which are all ordered + info = OrderedDict({'s': OrderedDict({'i': info})}) + expected = OrderedDict({'s': OrderedDict({'i': expected})}) + self.run_parsing_test(parse_values, info, expected, exception) + + + @pytest.mark.parametrize('info, expected, exception',[ + # Test interpreting renaming a section + (['section/item -> new_section/item'], [['section','item','any', 'any'], ['new_section','item','any','any']], None), + # Test interpreting renaming an item + (['section/item -> section/new_item'], [['section','item','any', 'any'], ['section','new_item','any','any']], None), + # Tet syntax error + ([ 'section/item > REMOVED'], [], ValueError), + + ]) + def test_parse_changes(self, info, expected, exception): + ''' Tests tha change lof parsing. Ensures we raise a value error for invalid syntax and that valid syntax is parsed correctly - """ - d = ["section/item -> new_section/item", "section/item -> REMOVED"] - - changes = parse_changes(d) - assert changes[0][1][0] == "new_section" - assert changes[1][1][0] == "removed" - - # Test syntax errors - with pytest.raises(ValueError): - parse_changes(["section/item > new_section/item"]) + ''' + # d = ['section/item -> new_section/item', 'section/item -> REMOVED'] + self.run_parsing_test(parse_changes, info, [expected], exception) From fe54b3fc6682a1996fe1a9093c02bec5b91a1a79 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Sun, 24 Jan 2021 09:58:21 -0700 Subject: [PATCH 04/21] Converted from xunit to fixture --- tests/test_checkers.py | 354 ++++++++++++++++++++--------------------- 1 file changed, 176 insertions(+), 178 deletions(-) diff --git a/tests/test_checkers.py b/tests/test_checkers.py index 56d1e21..92e3a5b 100644 --- a/tests/test_checkers.py +++ b/tests/test_checkers.py @@ -10,6 +10,7 @@ import inicheck from inicheck.checkers import * from inicheck.config import MasterConfig, UserConfig +import pytest class TestCheckers(): @@ -56,188 +57,185 @@ def run_a_checker(self, valids, invalids, checker, section='basic', else: assert not valid - @classmethod - def setup_class(self): - """ - Create some key structures for testing - """ - tests_p = os.path.join(os.path.dirname(inicheck.__file__), '../tests') - self.mcfg = MasterConfig(path=os.path.join(tests_p, - 'test_configs/master.ini')) - - self.ucfg = UserConfig(os.path.join(tests_p, "test_configs/base_cfg.ini"), - mcfg=self.mcfg) - - def test_string(self): - """ - Test we see strings as strings - """ - - # Confirm we these values are valid - valids = ['test'] - self.run_a_checker(valids, [], CheckString, item='username') - - # Confirm that casting a string with uppers will auto produce lowers - self.ucfg.cfg['basic']['username'] = 'Test' - - b = CheckString(config=self.ucfg, section='basic', item='username') - result = b.cast() - assert result == 'test' - - # Check we can capture a single item list for strings - b.is_list = True - result = b.cast() - assert result == ['test'] - - # Check we capture the when alist is passed and were not expecting one - b.is_list = False - result = b.cast() - assert not isinstance(result, list) - - def test_bool(self): - """ - Test we see booleans as booleans - """ - - # Confirm we these values are valid - valids = [True, False, 'true', 'FALSE', 'yes', 'y', 'no', 'n'] - invalids = ['Fasle', 'treu'] - self.run_a_checker(valids, invalids, CheckBool, item='debug') - - def test_float(self): - """ - Test we see floats as floats - """ - valids = [-1.5, '2.5'] - invalids = ['tough'] - - self.run_a_checker(valids, invalids, CheckFloat, item='time_out') - - def test_int(self): - """ - Test we see int as ints and not floats - """ - - # Confirm we these values are valid - valids = [10, '2', 1.0] - invalids = ['tough', '1.5', ''] - self.run_a_checker(valids, invalids, CheckInt, item='num_users') - - def test_datetime(self): - """ - Test we see datetime as datetime - """ - - valids = ['2018-01-10 10:10', '10-10-2018', "October 10 2018"] - invalids = ['Not-a-date', 'Wednesday 5th'] - self.run_a_checker(valids, invalids, CheckDatetime, item='start_date') - - def test_list(self): - """ - Test our listing methods using lists of dates. - """ - - valids = ['10-10-2019', ['10-10-2019'], ['10-10-2019', '11-10-2019']] - self.run_a_checker(valids, [], CheckDatetime, item='epochs') - - # # We have a list in the config when we don't want one - # self.ucfg.cfg['section']['item'] = ["test", "test2"] - # invalids = ['test'] - # self.run_a_checker([], invalids, CheckFilename, is_list=False) - - def test_directory(self): - """ - Tests the base class for path based checkers - """ - - valids = ["./"] - invalids = ['./somecrazy_location!/'] - self.run_a_checker(valids, invalids, CheckDirectory, item='tmp') - - # ISSUE #44 check for default when string is empty - self.ucfg.cfg.update({'basic': {'tmp': ''}}) - b = CheckDirectory(config=self.ucfg, section='basic', item='tmp') - value = b.cast() - assert os.path.split(value)[-1] == 'temp' - - def test_filename(self): - """ - Tests the base class for path based checkers - """ - # Remember paths are relative to the config - valids = ["../test_checkers.py"] - invalids = ['dumbfilename'] - self.run_a_checker(valids, invalids, CheckFilename, item='log') - - # ISSUE #44 check for default when string is empty - self.ucfg.cfg.update({'basic': {'log': ''}}) - self.ucfg.mcfg.cfg['basic']['log'].default = None - b = CheckFilename(config=self.ucfg, section='basic', item='log') - value = b.cast() - assert value is None - - # ISSUE #44 check for default when string is empty but default is a - # path - self.ucfg.cfg.update({'basic': {'log': ''}}) - self.ucfg.mcfg.cfg['basic']['log'].default = 'log.txt' - b = CheckFilename(config=self.ucfg, section='basic', item='log') - value = b.cast() - assert os.path.split(value)[-1] == 'log.txt' - - def test_url(self): - """ - Test our url checking. - """ - valids = ["https://google.com"] - invalids = ["https://micah_subnaught_is_awesome.com"] - self.run_a_checker(valids, invalids, CheckURL, - item='favorite_web_site') - - def test_datetime_ordered_pairs(self): - """ - Tests the ordered datetime pair checker which looks for _start - _end pairs and confirms they occurs in the correct order. - - """ +@pytest.fixture +def check_tester(): + """ + Create some key structures for testing + """ + cls = TestCheckers() + tests_p = os.path.join(os.path.dirname(inicheck.__file__), '../tests') + cls.mcfg = MasterConfig(path=os.path.join(tests_p, + 'test_configs/master.ini')) + + cls.ucfg = UserConfig(os.path.join(tests_p, "test_configs/base_cfg.ini"), + mcfg=cls.mcfg) + return cls + +def test_string(check_tester): + """ + Test we see strings as strings + """ + + # Confirm we these values are valid + valids = ['test'] + check_tester.run_a_checker(valids, [], CheckString, item='username') + + # Confirm that casting a string with uppers will auto produce lowers + check_tester.ucfg.cfg['basic']['username'] = 'Test' + + b = CheckString(config=check_tester.ucfg, section='basic', item='username') + result = b.cast() + assert result == 'test' + + # Check we can capture a single item list for strings + b.is_list = True + result = b.cast() + assert result == ['test'] + + # Check we capture the when alist is passed and were not expecting one + b.is_list = False + result = b.cast() + assert not isinstance(result, list) + +def test_bool(check_tester): + """ + Test we see booleans as booleans + """ + + # Confirm we these values are valid + valids = [True, False, 'true', 'FALSE', 'yes', 'y', 'no', 'n'] + invalids = ['Fasle', 'treu'] + check_tester.run_a_checker(valids, invalids, CheckBool, item='debug') + +def test_float(check_tester): + """ + Test we see floats as floats + """ + valids = [-1.5, '2.5'] + invalids = ['tough'] + + check_tester.run_a_checker(valids, invalids, CheckFloat, item='time_out') + +def test_int(check_tester): + """ + Test we see int as ints and not floats + """ + + # Confirm we these values are valid + valids = [10, '2', 1.0] + invalids = ['tough', '1.5', ''] + check_tester.run_a_checker(valids, invalids, CheckInt, item='num_users') + +def test_datetime(check_tester): + """ + Test we see datetime as datetime + """ + + valids = ['2018-01-10 10:10', '10-10-2018', "October 10 2018"] + invalids = ['Not-a-date', 'Wednesday 5th'] + check_tester.run_a_checker(valids, invalids, CheckDatetime, item='start_date') + +def test_list(check_tester): + """ + Test our listing methods using lists of dates. + """ + + valids = ['10-10-2019', ['10-10-2019'], ['10-10-2019', '11-10-2019']] + check_tester.run_a_checker(valids, [], CheckDatetime, item='epochs') + +def test_directory(check_tester): + """ + Tests the base class for path based checkers + """ + + valids = ["./"] + invalids = ['./somecrazy_location!/'] + check_tester.run_a_checker(valids, invalids, CheckDirectory, item='tmp') + + # ISSUE #44 check for default when string is empty + check_tester.ucfg.cfg.update({'basic': {'tmp': ''}}) + b = CheckDirectory(config=check_tester.ucfg, section='basic', item='tmp') + value = b.cast() + assert os.path.split(value)[-1] == 'temp' + +def test_filename(check_tester): + """ + Tests the base class for path based checkers + """ + # Remember paths are relative to the config + valids = ["../test_checkers.py"] + invalids = ['dumbfilename'] + check_tester.run_a_checker(valids, invalids, CheckFilename, item='log') + + # ISSUE #44 check for default when string is empty + check_tester.ucfg.cfg.update({'basic': {'log': ''}}) + check_tester.ucfg.mcfg.cfg['basic']['log'].default = None + b = CheckFilename(config=check_tester.ucfg, section='basic', item='log') + value = b.cast() + assert value is None + + # ISSUE #44 check for default when string is empty but default is a + # path + check_tester.ucfg.cfg.update({'basic': {'log': ''}}) + check_tester.ucfg.mcfg.cfg['basic']['log'].default = 'log.txt' + b = CheckFilename(config=check_tester.ucfg, section='basic', item='log') + value = b.cast() + assert os.path.split(value)[-1] == 'log.txt' + +def test_url(check_tester): + """ + Test our url checking. + """ + valids = ["https://google.com"] + invalids = ["https://micah_subnaught_is_awesome.com"] + check_tester.run_a_checker(valids, invalids, CheckURL, + item='favorite_web_site') + +def test_datetime_ordered_pairs(check_tester): + """ + Tests the ordered datetime pair checker which looks for _start + _end pairs and confirms they occurs in the correct order. + + """ + + # Test end dates com after start dates + starts = ["1-01-2019", "2019-10-01", "1998-01-14 15:00:00"] + ends = ["1-02-2019", "2019-10-02", "1998-01-14 19:00:00"] + + invalids_starts = ["01-01-2020", "2020-06-01", "1998-01-14 20:00:00"] + invalids_ends = ["01-01-2018", "2018-10-01", "1998-01-14 10:00:00", ] + + # Check for starts being before the end date + for start, end, error_start, error_end in zip(starts, ends, + invalids_starts, + invalids_ends): + # Check start values are before end values + acfg = {'basic': {'end_date': end}} + check_tester.run_a_checker([start], [error_start], CheckDatetimeOrderedPair, + item="start_date", + extra_config=acfg) - # Test end dates com after start dates - starts = ["1-01-2019", "2019-10-01", "1998-01-14 15:00:00"] - ends = ["1-02-2019", "2019-10-02", "1998-01-14 19:00:00"] - - invalids_starts = ["01-01-2020", "2020-06-01", "1998-01-14 20:00:00"] - invalids_ends = ["01-01-2018", "2018-10-01", "1998-01-14 10:00:00", ] - - # Check for starts being before the end date - for start, end, error_start, error_end in zip(starts, ends, - invalids_starts, - invalids_ends): - # Check start values are before end values - acfg = {'basic': {'end_date': end}} - self.run_a_checker([start], [error_start], CheckDatetimeOrderedPair, - item="start_date", - extra_config=acfg) - - # Check start values are before end values - acfg = {'basic': {'start_date': start}} - self.run_a_checker([end], [error_end], CheckDatetimeOrderedPair, - item="end_date", - extra_config=acfg) - - # Check start end values are equal error - acfg = {'basic': {'start_date': '2020-10-01'}} - self.run_a_checker(["2020-10-02"], ["2020-10-01"], - CheckDatetimeOrderedPair, + # Check start values are before end values + acfg = {'basic': {'start_date': start}} + check_tester.run_a_checker([end], [error_end], CheckDatetimeOrderedPair, item="end_date", extra_config=acfg) - def test_bounds(self): - """ - MasterConfig options now have max and min values to constrain continuous - types. This tests whether that works - """ - - self.run_a_checker([1.0, 0.0, '0.5'], [1.1, -1.0, '10'], CheckFloat, - item='fraction') + # Check start end values are equal error + acfg = {'basic': {'start_date': '2020-10-01'}} + check_tester.run_a_checker(["2020-10-02"], ["2020-10-01"], + CheckDatetimeOrderedPair, + item="end_date", + extra_config=acfg) + +def test_bounds(check_tester): + """ + MasterConfig options now have max and min values to constrain continuous + types. This tests whether that works + """ + + check_tester.run_a_checker([1.0, 0.0, '0.5'], [1.1, -1.0, '10'], CheckFloat, + item='fraction') if __name__ == '__main__': From 8b53cdeed91d88fd0796b87ba8c66602b23b307e Mon Sep 17 00:00:00 2001 From: micah johnson Date: Sun, 24 Jan 2021 10:05:11 -0700 Subject: [PATCH 05/21] isort and pep8 --- tests/test_checkers.py | 43 ++++++++++++++------ tests/test_cli.py | 8 ++-- tests/test_config.py | 2 +- tests/test_parsing.py | 88 +++++++++++++++++++++-------------------- tests/test_tools.py | 2 +- tests/test_utilities.py | 80 +++++++++++++++++++------------------ 6 files changed, 123 insertions(+), 100 deletions(-) diff --git a/tests/test_checkers.py b/tests/test_checkers.py index 92e3a5b..08f015d 100644 --- a/tests/test_checkers.py +++ b/tests/test_checkers.py @@ -8,9 +8,10 @@ """ import inicheck +import pytest from inicheck.checkers import * from inicheck.config import MasterConfig, UserConfig -import pytest + class TestCheckers(): @@ -57,6 +58,7 @@ def run_a_checker(self, valids, invalids, checker, section='basic', else: assert not valid + @pytest.fixture def check_tester(): """ @@ -65,12 +67,13 @@ def check_tester(): cls = TestCheckers() tests_p = os.path.join(os.path.dirname(inicheck.__file__), '../tests') cls.mcfg = MasterConfig(path=os.path.join(tests_p, - 'test_configs/master.ini')) + 'test_configs/master.ini')) cls.ucfg = UserConfig(os.path.join(tests_p, "test_configs/base_cfg.ini"), - mcfg=cls.mcfg) + mcfg=cls.mcfg) return cls + def test_string(check_tester): """ Test we see strings as strings @@ -97,6 +100,7 @@ def test_string(check_tester): result = b.cast() assert not isinstance(result, list) + def test_bool(check_tester): """ Test we see booleans as booleans @@ -107,6 +111,7 @@ def test_bool(check_tester): invalids = ['Fasle', 'treu'] check_tester.run_a_checker(valids, invalids, CheckBool, item='debug') + def test_float(check_tester): """ Test we see floats as floats @@ -116,6 +121,7 @@ def test_float(check_tester): check_tester.run_a_checker(valids, invalids, CheckFloat, item='time_out') + def test_int(check_tester): """ Test we see int as ints and not floats @@ -126,6 +132,7 @@ def test_int(check_tester): invalids = ['tough', '1.5', ''] check_tester.run_a_checker(valids, invalids, CheckInt, item='num_users') + def test_datetime(check_tester): """ Test we see datetime as datetime @@ -133,7 +140,12 @@ def test_datetime(check_tester): valids = ['2018-01-10 10:10', '10-10-2018', "October 10 2018"] invalids = ['Not-a-date', 'Wednesday 5th'] - check_tester.run_a_checker(valids, invalids, CheckDatetime, item='start_date') + check_tester.run_a_checker( + valids, + invalids, + CheckDatetime, + item='start_date') + def test_list(check_tester): """ @@ -143,6 +155,7 @@ def test_list(check_tester): valids = ['10-10-2019', ['10-10-2019'], ['10-10-2019', '11-10-2019']] check_tester.run_a_checker(valids, [], CheckDatetime, item='epochs') + def test_directory(check_tester): """ Tests the base class for path based checkers @@ -158,6 +171,7 @@ def test_directory(check_tester): value = b.cast() assert os.path.split(value)[-1] == 'temp' + def test_filename(check_tester): """ Tests the base class for path based checkers @@ -182,6 +196,7 @@ def test_filename(check_tester): value = b.cast() assert os.path.split(value)[-1] == 'log.txt' + def test_url(check_tester): """ Test our url checking. @@ -189,7 +204,8 @@ def test_url(check_tester): valids = ["https://google.com"] invalids = ["https://micah_subnaught_is_awesome.com"] check_tester.run_a_checker(valids, invalids, CheckURL, - item='favorite_web_site') + item='favorite_web_site') + def test_datetime_ordered_pairs(check_tester): """ @@ -212,21 +228,22 @@ def test_datetime_ordered_pairs(check_tester): # Check start values are before end values acfg = {'basic': {'end_date': end}} check_tester.run_a_checker([start], [error_start], CheckDatetimeOrderedPair, - item="start_date", - extra_config=acfg) + item="start_date", + extra_config=acfg) # Check start values are before end values acfg = {'basic': {'start_date': start}} check_tester.run_a_checker([end], [error_end], CheckDatetimeOrderedPair, - item="end_date", - extra_config=acfg) + item="end_date", + extra_config=acfg) # Check start end values are equal error acfg = {'basic': {'start_date': '2020-10-01'}} check_tester.run_a_checker(["2020-10-02"], ["2020-10-01"], - CheckDatetimeOrderedPair, - item="end_date", - extra_config=acfg) + CheckDatetimeOrderedPair, + item="end_date", + extra_config=acfg) + def test_bounds(check_tester): """ @@ -235,7 +252,7 @@ def test_bounds(check_tester): """ check_tester.run_a_checker([1.0, 0.0, '0.5'], [1.1, -1.0, '10'], CheckFloat, - item='fraction') + item='fraction') if __name__ == '__main__': diff --git a/tests/test_cli.py b/tests/test_cli.py index fbce254..fab0e7e 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,11 +1,11 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -import sys import re -from os.path import dirname, abspath, join +import sys +from os.path import abspath, dirname, join -from inicheck.cli import inicheck_main, inidiff_main, current_version +from inicheck.cli import current_version, inicheck_main, inidiff_main from .test_output import capture_print @@ -104,4 +104,4 @@ def test_version(self): exception_message = re.search( '(exception|error)', str(current_version()), re.IGNORECASE ) - assert exception_message is None + assert exception_message is None diff --git a/tests/test_config.py b/tests/test_config.py index b550bfc..8af237c 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -9,8 +9,8 @@ import datetime import os -import pytest +import pytest from inicheck.config import * from inicheck.tools import cast_all_variables, get_checkers diff --git a/tests/test_parsing.py b/tests/test_parsing.py index 9cd46d6..1017f1a 100644 --- a/tests/test_parsing.py +++ b/tests/test_parsing.py @@ -6,8 +6,9 @@ Tests for `inicheck.iniparse` module. ''' -import pytest from collections import OrderedDict + +import pytest from inicheck.iniparse import * @@ -17,10 +18,10 @@ def run_parsing_test(self, fn, info, expected, exception): ''' Manages testing exceptions and testing dict output ''' - if type(info) == OrderedDict: + if isinstance(info, OrderedDict): expected = OrderedDict(expected) - if exception == None: + if exception is None: received = fn(info) # Test the dictionaries recieved match the expected @@ -31,24 +32,24 @@ def run_parsing_test(self, fn, info, expected, exception): received = fn(info) @pytest.mark.parametrize('info, expected, exception', [ - # Test a simple parsing - (['[s]','i:v'], {'s':['i:v']}, None), - # Test a section only without items - (['[s]'], {'s':[]}, None), - # Test every section is made lower case - (['[CASE]'], {'case':[]}, None), - # Test space remove before and after section - (['[ spaces ]'], {'spaces':[]}, None), - # Test a single line section/value is parsed - (['[single_line]i:v'], {'single_line':['i:v']}, None), - # Test some comment removal - (['#comment','[s]','i:v'], {'s':['i:v']}, None), - ([';[commented]'], {}, None), - (['[s]','i:v ;comment'], {'s':['i:v']}, None), - # Test exception with repeat sections - (['[test]', '#', '[test]'], {}, Exception), - # Test non-comment chars before the first section - (['a#', '#', '[test]'], {}, Exception), + # Test a simple parsing + (['[s]', 'i:v'], {'s': ['i:v']}, None), + # Test a section only without items + (['[s]'], {'s': []}, None), + # Test every section is made lower case + (['[CASE]'], {'case': []}, None), + # Test space remove before and after section + (['[ spaces ]'], {'spaces': []}, None), + # Test a single line section/value is parsed + (['[single_line]i:v'], {'single_line': ['i:v']}, None), + # Test some comment removal + (['#comment', '[s]', 'i:v'], {'s': ['i:v']}, None), + ([';[commented]'], {}, None), + (['[s]', 'i:v ;comment'], {'s': ['i:v']}, None), + # Test exception with repeat sections + (['[test]', '#', '[test]'], {}, Exception), + # Test non-comment chars before the first section + (['a#', '#', '[test]'], {}, Exception), ]) def test_parse_sections(self, info, expected, exception): @@ -64,16 +65,16 @@ def test_parse_sections(self, info, expected, exception): ''' self.run_parsing_test(parse_sections, info, expected, exception) - @pytest.mark.parametrize('info, expected, exception', [ - # Test simple config item parsing - (['a:10'], {'a':'10'}, None), - # Test a value thats a list parse with a line return which should simply merge them - (['a:10,','15,20'], {'a':'10, 15, 20'}, None), - # Test interpreting master file properties that span multiple lines - (['a: default=10','options=[10 15 20]'], {'a':'default=10 options=[10 15 20]'}, None), + # Test simple config item parsing + (['a:10'], {'a': '10'}, None), + # Test a value thats a list parse with a line return which should + # simply merge them + (['a:10,', '15,20'], {'a': '10, 15, 20'}, None), + # Test interpreting master file properties that span multiple lines + (['a: default=10', 'options=[10 15 20]'], { + 'a': 'default=10 options=[10 15 20]'}, None), ]) - def test_parse_items(self, info, expected, exception): ''' Tests our base function used to parse items after reading sections but @@ -88,12 +89,12 @@ def test_parse_items(self, info, expected, exception): expected = {'s': OrderedDict(expected)} self.run_parsing_test(parse_items, info, expected, exception) - - @pytest.mark.parametrize('info, expected, exception',[ - # Test parse values that might have excess chars - ('test1,\ttest2, test3', ['test1','test2', 'test3'], None ), - # Test parsing master properties parsing - ( '\tdefault=10,\toptions=[10 15 20]', ['default=10','options=[10 15 20]'], None), + @pytest.mark.parametrize('info, expected, exception', [ + # Test parse values that might have excess chars + ('test1,\ttest2, test3', ['test1', 'test2', 'test3'], None), + # Test parsing master properties parsing + ('\tdefault=10,\toptions=[10 15 20]', [ + 'default=10', 'options=[10 15 20]'], None), ]) def test_parse_values(self, info, expected, exception): ''' @@ -108,14 +109,15 @@ def test_parse_values(self, info, expected, exception): expected = OrderedDict({'s': OrderedDict({'i': expected})}) self.run_parsing_test(parse_values, info, expected, exception) - - @pytest.mark.parametrize('info, expected, exception',[ - # Test interpreting renaming a section - (['section/item -> new_section/item'], [['section','item','any', 'any'], ['new_section','item','any','any']], None), - # Test interpreting renaming an item - (['section/item -> section/new_item'], [['section','item','any', 'any'], ['section','new_item','any','any']], None), - # Tet syntax error - ([ 'section/item > REMOVED'], [], ValueError), + @pytest.mark.parametrize('info, expected, exception', [ + # Test interpreting renaming a section + (['section/item -> new_section/item'], [['section', 'item', + 'any', 'any'], ['new_section', 'item', 'any', 'any']], None), + # Test interpreting renaming an item + (['section/item -> section/new_item'], [['section', 'item', + 'any', 'any'], ['section', 'new_item', 'any', 'any']], None), + # Tet syntax error + (['section/item > REMOVED'], [], ValueError), ]) def test_parse_changes(self, info, expected, exception): diff --git a/tests/test_tools.py b/tests/test_tools.py index 1363049..07b0175 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -8,8 +8,8 @@ """ from collections import OrderedDict -import pytest +import pytest from inicheck.tools import * diff --git a/tests/test_utilities.py b/tests/test_utilities.py index ebe925f..03ef6b5 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -6,16 +6,18 @@ Tests for `inicheck.utilities` module. """ +from os.path import dirname, join + import pytest -from os.path import join, dirname from inicheck.tools import get_user_config from inicheck.utilities import * + @pytest.mark.parametrize("value, expected", [ -('test#comment', 'test'), -("test1;comment", 'test1'), -(";full in line comment", ''), -("testboth; test a comment with both types of # comments ", 'testboth'), + ('test#comment', 'test'), + ("test1;comment", 'test1'), + (";full in line comment", ''), + ("testboth; test a comment with both types of # comments ", 'testboth'), ]) def test_remove_comments(value, expected): """ @@ -25,16 +27,16 @@ def test_remove_comments(value, expected): assert out == expected -@pytest.mark.parametrize("value, unlist, expected",[ -# Test we make things into lists -('test', False, True), -(['test'], False, True), -# Test we can take things out of a list -('test', True, False), -(['test'], True, False), -(['test'], True, False), -# Test we don't take lists of len > 1 out of a list -(['test', 'test2'], True, True) +@pytest.mark.parametrize("value, unlist, expected", [ + # Test we make things into lists + ('test', False, True), + (['test'], False, True), + # Test we can take things out of a list + ('test', True, False), + (['test'], True, False), + (['test'], True, False), + # Test we don't take lists of len > 1 out of a list + (['test', 'test2'], True, True) ]) def test_mk_lst(value, unlist, expected): """ @@ -61,7 +63,7 @@ def test_find_options_in_recipes(): such that they don't exist at the same time. Used in the inimake cli """ base = dirname(__file__) - ucfg = get_user_config(join(base,"test_configs","full_config.ini"), + ucfg = get_user_config(join(base, "test_configs", "full_config.ini"), modules='inicheck') mcfg = ucfg.mcfg choices = find_options_in_recipes(mcfg.recipes, mcfg.cfg.keys(), @@ -77,8 +79,8 @@ def test_get_relative_to_cfg(): Tests that all paths in a config can be made relative to the config """ base = dirname(__file__) - ucfg = get_user_config(join(base,"test_configs","full_config.ini"), - modules='inicheck') + ucfg = get_user_config(join(base, "test_configs", "full_config.ini"), + modules='inicheck') mcfg = ucfg.mcfg choices = find_options_in_recipes(mcfg.recipes, mcfg.cfg.keys(), "remove_section") @@ -86,15 +88,15 @@ def test_get_relative_to_cfg(): assert p == '../test_utilities.py' -@pytest.mark.parametrize('value, kw, count, expected',[ -# Finds test in string -("tests_my_kw", ['tests'], 1, True), -# Finds all three in the string -("tests_my_kw", ['tests', 'my', 'kw'], 3, True), -# Finds all 2 in the string -("te_my_kw", ['tests', 'my', 'kw'], 2, True), -# No match -("te_ym_k", ['tests', 'my', 'kw'], 1, False), +@pytest.mark.parametrize('value, kw, count, expected', [ + # Finds test in string + ("tests_my_kw", ['tests'], 1, True), + # Finds all three in the string + ("tests_my_kw", ['tests', 'my', 'kw'], 3, True), + # Finds all 2 in the string + ("te_my_kw", ['tests', 'my', 'kw'], 2, True), + # No match + ("te_ym_k", ['tests', 'my', 'kw'], 1, False), ]) def test_is_kw_matched(value, kw, count, expected): """ @@ -104,9 +106,10 @@ def test_is_kw_matched(value, kw, count, expected): # Finds test in string assert is_kw_matched(value, kw, kw_count=count) == expected -@pytest.mark.parametrize('kw, count, expected',[ -(['test'], 1, 'test_my_str'), -(['test', 'float'], 2, 'test_my_float')]) + +@pytest.mark.parametrize('kw, count, expected', [ + (['test'], 1, 'test_my_str'), + (['test', 'float'], 2, 'test_my_float')]) def test_get_kw_match(kw, count, expected): """ Tests utilities.get_kw_match which looks through a list of strings with @@ -119,14 +122,15 @@ def test_get_kw_match(kw, count, expected): t = get_kw_match(test_lst, kw, kw_count=count) assert t == expected + @pytest.mark.parametrize('value, dtype, allow_none, expected', [ -# Test this is a convertible string -('10.0', float, False, [True, None]), -# Test the allow None kwarg -(None, float, False, [False, "Value cannot be None"]), -(None, float, True, [True, None]), -# Test we report an error -('abc', float, False, [False, 'Expecting float received str']), + # Test this is a convertible string + ('10.0', float, False, [True, None]), + # Test the allow None kwarg + (None, float, False, [False, "Value cannot be None"]), + (None, float, True, [True, None]), + # Test we report an error + ('abc', float, False, [False, 'Expecting float received str']), ]) def test_is_valid(value, dtype, allow_none, expected): """ @@ -144,7 +148,7 @@ def test_get_inicheck_cmd(): """ base = dirname(__file__) - fname = join(base,"test_configs","full_config.ini") + fname = join(base, "test_configs", "full_config.ini") cmd = get_inicheck_cmd( fname, modules='inicheck', From e3b1aed521e6f8d8f078603fd023de145350b4db Mon Sep 17 00:00:00 2001 From: micah johnson Date: Thu, 28 Jan 2021 21:08:48 -0700 Subject: [PATCH 06/21] Updated to fixtures, autopep8, and separation of exception cases to individual test --- tests/test_checkers.py | 17 ++- tests/test_cli.py | 142 +++++++++-------- tests/test_config.py | 339 ++++++++++++++++++++++------------------- tests/test_output.py | 141 +++++++++-------- tests/test_parsing.py | 231 ++++++++++++++++------------ tests/test_tools.py | 145 +++++++++--------- 6 files changed, 556 insertions(+), 459 deletions(-) diff --git a/tests/test_checkers.py b/tests/test_checkers.py index 08f015d..3131f6d 100644 --- a/tests/test_checkers.py +++ b/tests/test_checkers.py @@ -13,7 +13,14 @@ from inicheck.config import MasterConfig, UserConfig -class TestCheckers(): +class CheckerTester(): + def __init__(self): + tests_p = os.path.join(os.path.dirname(inicheck.__file__), '../tests') + self.mcfg = MasterConfig(path=os.path.join(tests_p, + 'test_configs/master.ini')) + + self.ucfg = UserConfig(os.path.join(tests_p, "test_configs/base_cfg.ini"), + mcfg=self.mcfg) def run_a_checker(self, valids, invalids, checker, section='basic', item='item', @@ -64,13 +71,7 @@ def check_tester(): """ Create some key structures for testing """ - cls = TestCheckers() - tests_p = os.path.join(os.path.dirname(inicheck.__file__), '../tests') - cls.mcfg = MasterConfig(path=os.path.join(tests_p, - 'test_configs/master.ini')) - - cls.ucfg = UserConfig(os.path.join(tests_p, "test_configs/base_cfg.ini"), - mcfg=cls.mcfg) + cls = CheckerTester() return cls diff --git a/tests/test_cli.py b/tests/test_cli.py index fab0e7e..7451312 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -8,100 +8,112 @@ from inicheck.cli import current_version, inicheck_main, inidiff_main from .test_output import capture_print +import pytest -class TestCLI(): - - @classmethod - def setup_class(cls): - cls.test_base = abspath(join(dirname(__file__), 'test_configs')) - cls.master = [ - join(cls.test_base, 'recipes.ini'), - join(cls.test_base, 'CoreConfig.ini') +class CLITester(): + def __init__(self): + self.test_base = abspath(join(dirname(__file__), 'test_configs')) + self.master = [ + join(self.test_base, 'recipes.ini'), + join(self.test_base, 'CoreConfig.ini') ] - cls.full_config = join(cls.test_base, 'full_config.ini') + self.full_config = join(self.test_base, 'full_config.ini') - @classmethod - def capture_with_params(cls, **kwargs): + def capture_with_params(self, **kwargs): return str( capture_print( inicheck_main, - config_file=cls.full_config, - master=cls.master, + config_file=self.full_config, + master=self.master, **kwargs ) ) - def test_basic_inicheck_cli(self): - """ Test simplest usage of CLI """ - s = self.capture_with_params() +@pytest.fixture +def cli_tester(): + cls = CLITester() - assert s.count("File does not exist") >= 9 - assert s.count("Not a registered option") >= 20 + return cls - def test_inicheck_recipe_use(self): - """ Test recipe output """ - s = self.capture_with_params(show_recipes=True) +def test_basic_inicheck_cli(cli_tester): + """ Test simplest usage of CLI """ - assert s.count("_recipe") == 20 + s = cli_tester.capture_with_params() - def test_inicheck_non_defaults_use(self): - """ Test non-default output""" + assert s.count("File does not exist") >= 9 + assert s.count("Not a registered option") >= 20 - s = self.capture_with_params(show_non_defaults=True) - assert s.count("wind") >= 7 - assert s.count("albedo") >= 3 +def test_inicheck_recipe_use(cli_tester): + """ Test recipe output """ - def test_inicheck_details_use(self): - """ Test details output """ + s = cli_tester.capture_with_params(show_recipes=True) - s = self.capture_with_params(details=['topo']) + assert s.count("_recipe") == 20 - assert s.count("topo") >= 4 - s = self.capture_with_params(details=['topo', 'basin_lat']) +def test_inicheck_non_defaults_use(cli_tester): + """ Test non-default output""" - assert s.count("topo") >= 1 + s = cli_tester.capture_with_params(show_non_defaults=True) - def test_inicheck_changelog_use(self): - """ Test changelog detection output """ + assert s.count("wind") >= 7 + assert s.count("albedo") >= 3 - old_cfg = join(self.test_base, 'old_smrf_config.ini') - s = str(capture_print( - inicheck_main, - config_file=old_cfg, - master=self.master, - changelog_file=join(self.test_base, 'changelog.ini') - )) - assert s.count("topo") == 7 - assert s.count("wind") == 12 - assert s.count("stations") == 5 - assert s.count("solar") == 9 - assert s.count("precip") == 18 - assert s.count("air_temp") == 9 - assert s.count("albedo") == 30 +def test_inicheck_details_use(cli_tester): + """ Test details output """ - def test_inidiff(self): - """ - Tests if the inidiff script is producing the same information - """ + s = cli_tester.capture_with_params(details=['topo']) - configs = [ - join(self.test_base, 'full_config.ini'), - join(self.test_base, 'base_cfg.ini') - ] + assert s.count("topo") >= 4 - s = capture_print(inidiff_main, configs, master=self.master) + s = cli_tester.capture_with_params(details=['topo', 'basin_lat']) - mismatches = s.split("config mismatches:")[-1].strip() - assert '117' in mismatches + assert s.count("topo") >= 1 - def test_version(self): - exception_message = re.search( - '(exception|error)', str(current_version()), re.IGNORECASE - ) - assert exception_message is None + +def test_inicheck_changelog_use(cli_tester): + """ Test changelog detection output """ + + old_cfg = join(cli_tester.test_base, 'old_smrf_config.ini') + + s = str(capture_print( + inicheck_main, + config_file=old_cfg, + master=cli_tester.master, + changelog_file=join(cli_tester.test_base, 'changelog.ini') + )) + assert s.count("topo") == 7 + assert s.count("wind") == 12 + assert s.count("stations") == 5 + assert s.count("solar") == 9 + assert s.count("precip") == 18 + assert s.count("air_temp") == 9 + assert s.count("albedo") == 30 + + +def test_inidiff(cli_tester): + """ + Tests if the inidiff script is producing the same information + """ + + configs = [ + join(cli_tester.test_base, 'full_config.ini'), + join(cli_tester.test_base, 'base_cfg.ini') + ] + + s = capture_print(inidiff_main, configs, master=cli_tester.master) + + mismatches = s.split("config mismatches:")[-1].strip() + assert '117' in mismatches + + +def test_version(cli_tester): + exception_message = re.search( + '(exception|error)', str(current_version()), re.IGNORECASE + ) + assert exception_message is None diff --git a/tests/test_config.py b/tests/test_config.py index 8af237c..7a74028 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -67,10 +67,8 @@ def compare_config(generated_config, truth_config, return True -class TestUserConfig(): - - @classmethod - def setup_class(self): +class UserConfigTester(): + def __init__(self): """ """ fname = os.path.abspath( @@ -79,45 +77,52 @@ def setup_class(self): mcfg = MasterConfig(modules='inicheck') self.ucfg = UserConfig(fname, mcfg=mcfg) - def test_ucfg_init(self): - """ - Simply opens and looks at the base config BEFORE all the recipes and - stuff has been applied - """ - # Assert important attributes - for a in ['mcfg', 'cfg', 'raw_cfg', 'recipes']: - assert(hasattr(self.ucfg, a)) +@pytest.fixture +def ucfg_tester(): + return UserConfigTester() - def test_apply_recipes(self): - """ - Tests that the correct recipes were identified to be used for - interpretation - """ - truth = {'topo': - {'filename': ['None'], - 'type': 'netcdf'}, - 'cloud_factor': - {'distribution': 'idw'} - } +def test_ucfg_init(ucfg_tester): + """ + Simply opens and looks at the base config BEFORE all the recipes and + stuff has been applied + """ - self.ucfg.apply_recipes() - valid_recipes = ['topo_basic_recipe', 'time_recipe', 'air_temp_recipe', - 'vp_recipe', 'wind_recipe', 'precip_recipe', - 'non_winstral_recipe', 'cloud_factor_recipe', - 'albedo_recipe', 'date_decay_method_recipe', - 'solar_recipe', 'thermal_recipe', 'soil_recipe', - 'output_recipe', 'system_recipe', 'csv_recipe', - 'remove_wind_ninja_recipe', 'non_grid_local_recipe', - 'dk_recipe', 'idw_recipe'] - for v in valid_recipes: - assert v in [r.name for r in self.ucfg.recipes] + # Assert important attributes + for a in ['mcfg', 'cfg', 'raw_cfg', 'recipes']: + assert(hasattr(ucfg_tester.ucfg, a)) -class TestRecipes(): +def test_apply_recipes(ucfg_tester): + """ + Tests that the correct recipes were identified to be used for + interpretation + """ - def setup_method(self): + truth = {'topo': + {'filename': ['None'], + 'type': 'netcdf'}, + 'cloud_factor': + {'distribution': 'idw'} + } + + ucfg_tester.ucfg.apply_recipes() + valid_recipes = ['topo_basic_recipe', 'time_recipe', 'air_temp_recipe', + 'vp_recipe', 'wind_recipe', 'precip_recipe', + 'non_winstral_recipe', 'cloud_factor_recipe', + 'albedo_recipe', 'date_decay_method_recipe', + 'solar_recipe', 'thermal_recipe', 'soil_recipe', + 'output_recipe', 'system_recipe', 'csv_recipe', + 'remove_wind_ninja_recipe', 'non_grid_local_recipe', + 'dk_recipe', 'idw_recipe'] + for v in valid_recipes: + assert v in [r.name for r in ucfg_tester.ucfg.recipes] + + +class RecipeTester(): + + def __init__(self): self.fname = os.path.abspath(os.path.dirname(__file__) + '/test_configs/full_config.ini') @@ -126,7 +131,6 @@ def setup_method(self): def modify_cfg(self, mod_cfg): """ - """ for s, v in mod_cfg.items(): @@ -159,82 +163,92 @@ def check_defaults(self, section=None, ignore=[]): for i in checkable: assert self.ucfg.cfg[section][i] == self.mcfg.cfg[section][i].default - def test_apply_defaults(self): - """ - Tests the functionality of a recipes ability to add in defaults for - section when a section is not there. - """ - del self.ucfg.raw_cfg['csv'] +@pytest.fixture +def recipe_tester(): + return RecipeTester() - test = {'csv': {'stations': None}} - self.modify_cfg(test) - self.check_items(section='csv') - self.check_defaults(section='csv') +def test_apply_defaults(recipe_tester): + """ + Tests the functionality of a recipes ability to add in defaults for + section when a section is not there. + """ - def test_remove_section(self): - """ - Tests the functionality of a recipes ability to a section in defaults for - section when a section is not there. - """ + del recipe_tester.ucfg.raw_cfg['csv'] - del self.ucfg.raw_cfg['csv'] + test = {'csv': {'stations': None}} + recipe_tester.modify_cfg(test) - # Order matters, since we have conflicting recipes the first one will be - # applied, in this case CSV will beat out gridded - test = {'csv': {'stations': None}, - 'gridded': {}} + recipe_tester.check_items(section='csv') + recipe_tester.check_defaults(section='csv') - self.modify_cfg(test) - self.check_items(section='csv') - assert 'gridded' not in self.ucfg.cfg.keys() +def test_remove_section(recipe_tester): + """ + Tests the functionality of a recipes ability to a section in defaults for + section when a section is not there. + """ - def test_remove_item_for_a_section(self): - """ - Sometimes a recipe will remove an item when a certain section is present - This tests that scenario occurs, uses thermal_distribution_recipe - """ + del recipe_tester.ucfg.raw_cfg['csv'] - test = {'gridded': {'data_type': 'wrf'}} - # The order of recipes matters. Del the csv section to avoid recipes on - # it - del self.ucfg.raw_cfg['csv'] - self.modify_cfg(test) + # Order matters, since we have conflicting recipes the first one will be + # applied, in this case CSV will beat out gridded + test = {'csv': {'stations': None}, + 'gridded': {}} - assert 'distribution' not in self.ucfg.cfg['thermal'].keys() + recipe_tester.modify_cfg(test) - def test_add_items_for_has_value(self): - """ - Recipes have the available keyword has_value to trigger on event where - a section item has a value. This tests that we can trigger on it and - make edits. Test uses the idw_recipe in which if any section is found - with the item distribution set to idw, then we add idw_power and remove - dk_ncores. - """ + recipe_tester.check_items(section='csv') + assert 'gridded' not in recipe_tester.ucfg.cfg.keys() + + +def test_remove_item_for_a_section(recipe_tester): + """ + Sometimes a recipe will remove an item when a certain section is present + This tests that scenario occurs, uses thermal_distribution_recipe + """ - test = {'precip': {'distribution': 'idw', 'dk_ncores': '2'}} - self.modify_cfg(test) + test = {'gridded': {'data_type': 'wrf'}} + # The order of recipes matters. Del the csv section to avoid recipes on + # it + del recipe_tester.ucfg.raw_cfg['csv'] + recipe_tester.modify_cfg(test) - assert 'dk_ncores' not in self.ucfg.cfg['precip'].keys() - assert 'idw_power' in self.ucfg.cfg['precip'].keys() + assert 'distribution' not in recipe_tester.ucfg.cfg['thermal'].keys() - def test_apply_defaults_for_has_value(self): - """ - This recipe applies defaults to items when an item has a certain value - This test uses the krig_recipe in which any item distribution is set to - kriging applies several defautls. - """ - test = {'precip': {'distribution': 'kriging', 'dk_ncores': '2'}} - self.modify_cfg(test) - assert 'krig_variogram_model' in self.ucfg.cfg['precip'].keys() - assert 'dk_ncores' not in self.ucfg.cfg['precip'].keys() +def test_add_items_for_has_value(recipe_tester): + """ + Recipes have the available keyword has_value to trigger on event where + a section item has a value. This tests that we can trigger on it and + make edits. Test uses the idw_recipe in which if any section is found + with the item distribution set to idw, then we add idw_power and remove + dk_ncores. + """ + + test = {'precip': {'distribution': 'idw', 'dk_ncores': '2'}} + recipe_tester.modify_cfg(test) + + assert 'dk_ncores' not in recipe_tester.ucfg.cfg['precip'].keys() + assert 'idw_power' in recipe_tester.ucfg.cfg['precip'].keys() -class TestMasterConfig(): - def setup_method(self): +def test_apply_defaults_for_has_value(recipe_tester): + """ + This recipe applies defaults to items when an item has a certain value + This test uses the krig_recipe in which any item distribution is set to + kriging applies several defautls. + """ + + test = {'precip': {'distribution': 'kriging', 'dk_ncores': '2'}} + recipe_tester.modify_cfg(test) + assert 'krig_variogram_model' in recipe_tester.ucfg.cfg['precip'].keys() + assert 'dk_ncores' not in recipe_tester.ucfg.cfg['precip'].keys() + + +class MasterConfigTester(): + def __init__(self): """ Stage our truthing data here """ @@ -246,69 +260,82 @@ def setup_method(self): 'detrend': 'true', 'dk_ncores': '1'}} - def test_grabbing_mcfg(self): - """ - Builds a master config from the module and paths, check it. - """ - # Build a master config file using multiple files - try: - mcfg = MasterConfig(modules='inicheck') - assert True - except BaseException: - assert False - - base = os.path.dirname(__file__) - master = os.path.join(base, "./test_configs/CoreConfig.ini") - recipes = os.path.join(base, "./test_configs/recipes.ini") - - try: - mcfg = MasterConfig(path=[master, recipes]) - assert True - except BaseException: - assert False - - def test_add_files(self): - """ - Builds a master config from the files, check it. - """ - # Build a master config file using multiple files - base = os.path.dirname(__file__) - master = os.path.join(base, "test_configs/CoreConfig.ini") - recipes = os.path.join(base, "test_configs/recipes.ini") - mcfg = MasterConfig(path=master) - mcfg.cfg = mcfg.add_files([master, recipes]) +@pytest.fixture +def mcfg_tester(): + return MasterConfigTester() + - valid_sections = ['topo', 'csv', 'air_temp'] - for v in valid_sections: - assert v in mcfg.cfg.keys() +def test_grabbing_mcfg(mcfg_tester): + """ + Builds a master config from the module and paths, check it. + """ + # Build a master config file using multiple files + try: + mcfg = MasterConfig(modules='inicheck') + assert True + except BaseException: + assert False - assert 'topo_basic_recipe' in [r.name for r in mcfg.recipes] + base = os.path.dirname(__file__) + master = os.path.join(base, "./test_configs/CoreConfig.ini") + recipes = os.path.join(base, "./test_configs/recipes.ini") - def test_check_types(self): - """ - Checks to make sure we throw the correct error when an unknown data - type is requested - """ + try: + mcfg = MasterConfig(path=[master, recipes]) + assert True + except BaseException: + assert False - # Call out a BS entry type to raise the error - checkers = get_checkers() - invalids = ['str', 'filepath', 'criticalfile'] - valids = ['bool', 'criticaldirectory', 'criticalfilename', - 'discretionarycriticalfilename', 'datetime', - 'datetimeorderedpair', 'directory', 'filename', 'float', - 'int', 'string', 'url'] - - for z, values in enumerate([invalids, valids]): - for kw in values: - line = ["type = {}".format(kw), "description = test"] - cfg = {"section": {"test": ConfigEntry(name='test', - parseable_line=line)}} - # invalids - if z == 0: - with pytest.raises(ValueError): - check_types(cfg, checkers) - - # valids - else: - assert check_types(cfg, checkers) + +def test_add_files(mcfg_tester): + """ + Builds a master config from the files, check it. + """ + # Build a master config file using multiple files + base = os.path.dirname(__file__) + master = os.path.join(base, "test_configs/CoreConfig.ini") + recipes = os.path.join(base, "test_configs/recipes.ini") + + mcfg = MasterConfig(path=master) + mcfg.cfg = mcfg.add_files([master, recipes]) + + valid_sections = ['topo', 'csv', 'air_temp'] + for v in valid_sections: + assert v in mcfg.cfg.keys() + + assert 'topo_basic_recipe' in [r.name for r in mcfg.recipes] + + +def test_check_types(mcfg_tester): + """ + Checks to make sure we throw the correct error when an unknown data + type is requested + """ + + # Call out a BS entry type to raise the error + checkers = get_checkers() + valids = ['bool', 'criticaldirectory', 'criticalfilename', + 'discretionarycriticalfilename', 'datetime', + 'datetimeorderedpair', 'directory', 'filename', 'float', + 'int', 'string', 'url'] + + for kw in valids: + line = ["type = {}".format(kw), "description = test"] + cfg = {"section": {"test": ConfigEntry(name='test', + parseable_line=line)}} + assert check_types(cfg, checkers) + + +def test_check_types_exception(mcfg_tester): + + # Call out a BS entry type to raise the error + checkers = get_checkers() + invalids = ['str', 'filepath', 'criticalfile'] + + for kw in invalids: + line = ["type = {}".format(kw), "description = test"] + cfg = {"section": {"test": ConfigEntry(name='test', + parseable_line=line)}} + with pytest.raises(ValueError): + check_types(cfg, checkers) diff --git a/tests/test_output.py b/tests/test_output.py index eba200e..20800c1 100644 --- a/tests/test_output.py +++ b/tests/test_output.py @@ -9,12 +9,14 @@ import io import os +from os.path import isfile import shutil from collections import OrderedDict from contextlib import redirect_stdout from inicheck.output import * from inicheck.tools import get_user_config +import pytest def capture_print(function_call, *args, **kwargs): @@ -34,77 +36,84 @@ def capture_print(function_call, *args, **kwargs): with redirect_stdout(f): function_call(*args, **kwargs) out = f.getvalue() + + out_f = 'out_config.ini' + if isfile(out_f): + os.remove(out_f) return out -class TestOutput(): +class OutputTester(): - @classmethod - def setup_class(self): + def __init__(self): base = os.path.dirname(__file__) self.ucfg = get_user_config(os.path.join(base, "test_configs/full_config.ini"), modules="inicheck") - @classmethod - def teardown_Class(self): - """ - Delete any files - """ - os.remove('out_config.ini') - - def test_generate_config(self): - """ - Tests if we generate a config to a file - """ - generate_config(self.ucfg, 'out_config.ini', cli=False) - - with open('out_config.ini') as fp: - lines = fp.readlines() - fp.close() - - # Assert a header is written - assert 'Configuration' in lines[1] - - key_count = 0 - - # Assert all the sections are written - for k in self.ucfg.cfg.keys(): - for l in lines: - if k in l: - key_count += 1 - break - - assert key_count == len(self.ucfg.cfg.keys()) - - def test_print_recipe_summary(self): - """ - Checks that the output produces 366 lines of recipe info - """ - lst_recipes = self.ucfg.mcfg.recipes - out = capture_print(print_recipe_summary, lst_recipes) - assert len(out.split('\n')) == 366 - assert out.count('recipe') == 34 - - def test_print_details(self): - """ - Tests the function for printting help on the master config - """ - # Test for a whole section - details = ['air_temp'] - out = capture_print(print_details, details, self.ucfg.mcfg.cfg) - - assert out.count('air_temp') == len(self.ucfg.mcfg.cfg[details[0]]) - - # test for a section and item - details = ['precip', 'distribution'] - out = capture_print(print_details, details, self.ucfg.mcfg.cfg) - assert out.count('precip ') == 1 - - def test_non_default_print(self): - """ - Tests if printing the non-defaults is working - """ - out = capture_print(print_non_defaults, self.ucfg) - - # Check that we have 27 lines of info for non-defaults - assert len(out.split('\n')) == 27 + +@pytest.fixture +def output_tester(): + return OutputTester() + + +def test_generate_config(output_tester): + """ + Tests if we generate a config to a file + """ + generate_config(output_tester.ucfg, 'out_config.ini', cli=False) + + with open('out_config.ini') as fp: + lines = fp.readlines() + fp.close() + + # Assert a header is written + assert 'Configuration' in lines[1] + + key_count = 0 + + # Assert all the sections are written + for k in output_tester.ucfg.cfg.keys(): + for l in lines: + if k in l: + key_count += 1 + break + + assert key_count == len(output_tester.ucfg.cfg.keys()) + + +def test_print_recipe_summary(output_tester): + """ + Checks that the output produces 366 lines of recipe info + """ + lst_recipes = output_tester.ucfg.mcfg.recipes + out = capture_print(print_recipe_summary, lst_recipes) + + assert len(out.split('\n')) == 366 + assert out.count('recipe') == 34 + + +def test_print_details(output_tester): + """ + Tests the function for printting help on the master config + """ + # Test for a whole section + details = ['air_temp'] + out = capture_print(print_details, details, output_tester.ucfg.mcfg.cfg) + + assert out.count('air_temp') == len( + output_tester.ucfg.mcfg.cfg[details[0]]) + + # test for a section and item + details = ['precip', 'distribution'] + out = capture_print(print_details, details, output_tester.ucfg.mcfg.cfg) + assert out.count('precip ') == 1 + + +def test_non_default_print(output_tester): + """ + Tests if printing the non-defaults is working + """ + out = capture_print(print_non_defaults, output_tester.ucfg) + + # Check that we have 27 lines of info for non-defaults + assert len(out.split('\n')) == 27 diff --git a/tests/test_parsing.py b/tests/test_parsing.py index 1017f1a..48c8ee6 100644 --- a/tests/test_parsing.py +++ b/tests/test_parsing.py @@ -12,7 +12,7 @@ from inicheck.iniparse import * -class TestIniparse(): +class IniparseTester(): def run_parsing_test(self, fn, info, expected, exception): ''' @@ -31,99 +31,138 @@ def run_parsing_test(self, fn, info, expected, exception): with pytest.raises(exception): received = fn(info) - @pytest.mark.parametrize('info, expected, exception', [ - # Test a simple parsing - (['[s]', 'i:v'], {'s': ['i:v']}, None), - # Test a section only without items - (['[s]'], {'s': []}, None), - # Test every section is made lower case - (['[CASE]'], {'case': []}, None), - # Test space remove before and after section - (['[ spaces ]'], {'spaces': []}, None), - # Test a single line section/value is parsed - (['[single_line]i:v'], {'single_line': ['i:v']}, None), - # Test some comment removal - (['#comment', '[s]', 'i:v'], {'s': ['i:v']}, None), - ([';[commented]'], {}, None), - (['[s]', 'i:v ;comment'], {'s': ['i:v']}, None), - # Test exception with repeat sections - (['[test]', '#', '[test]'], {}, Exception), - # Test non-comment chars before the first section - (['a#', '#', '[test]'], {}, Exception), - - ]) - def test_parse_sections(self, info, expected, exception): - ''' - Tests our base function used to parse sections before we do any - processing - - Ensures: - - * we can handle comments around sections. - * caps issues - - ''' - self.run_parsing_test(parse_sections, info, expected, exception) - - @pytest.mark.parametrize('info, expected, exception', [ - # Test simple config item parsing - (['a:10'], {'a': '10'}, None), - # Test a value thats a list parse with a line return which should - # simply merge them - (['a:10,', '15,20'], {'a': '10, 15, 20'}, None), - # Test interpreting master file properties that span multiple lines - (['a: default=10', 'options=[10 15 20]'], { - 'a': 'default=10 options=[10 15 20]'}, None), - ]) - def test_parse_items(self, info, expected, exception): - ''' - Tests our base function used to parse items after reading sections but - before processing values. - * tests for wrapped lists - * tests to remove bracketed lists with comments - * tests for properties being added in master files - ''' - # Assign a section to the info which are always OrderedDict - info = OrderedDict({'s': info}) - expected = {'s': OrderedDict(expected)} - self.run_parsing_test(parse_items, info, expected, exception) - - @pytest.mark.parametrize('info, expected, exception', [ - # Test parse values that might have excess chars - ('test1,\ttest2, test3', ['test1', 'test2', 'test3'], None), - # Test parsing master properties parsing - ('\tdefault=10,\toptions=[10 15 20]', [ - 'default=10', 'options=[10 15 20]'], None), - ]) - def test_parse_values(self, info, expected, exception): - ''' - test parse values - - Ensures: - * Cleans up values with list entries - * Cleans up properties we recieve including non-comma sep lists - ''' - # Assign info to a place in a dict of dict which are all ordered - info = OrderedDict({'s': OrderedDict({'i': info})}) - expected = OrderedDict({'s': OrderedDict({'i': expected})}) - self.run_parsing_test(parse_values, info, expected, exception) - - @pytest.mark.parametrize('info, expected, exception', [ - # Test interpreting renaming a section - (['section/item -> new_section/item'], [['section', 'item', - 'any', 'any'], ['new_section', 'item', 'any', 'any']], None), - # Test interpreting renaming an item - (['section/item -> section/new_item'], [['section', 'item', - 'any', 'any'], ['section', 'new_item', 'any', 'any']], None), - # Tet syntax error - (['section/item > REMOVED'], [], ValueError), - - ]) - def test_parse_changes(self, info, expected, exception): - ''' - Tests tha change lof parsing. Ensures we raise a value error for invalid - syntax and that valid syntax is parsed correctly - ''' - # d = ['section/item -> new_section/item', 'section/item -> REMOVED'] - self.run_parsing_test(parse_changes, info, [expected], exception) +@pytest.fixture +def iniparse_tester(): + return IniparseTester() + + +@pytest.mark.parametrize('info, expected', [ + # Test a simple parsing + (['[s]', 'i:v'], {'s': ['i:v']}), + # Test a section only without items + (['[s]'], {'s': []}), + # Test every section is made lower case + (['[CASE]'], {'case': []}), + # Test space remove before and after section + (['[ spaces ]'], {'spaces': []}), + # Test a single line section/value is parsed + (['[single_line]i:v'], {'single_line': ['i:v']}), + # Test some comment removal + (['#comment', '[s]', 'i:v'], {'s': ['i:v']}), + ([';[commented]'], {}), + (['[s]', 'i:v ;comment'], {'s': ['i:v']}), ]) +def test_parse_sections(iniparse_tester, info, expected): + ''' + Tests our base function used to parse sections before we do any + processing + + Ensures: + + * we can handle comments around sections. + * caps issues + + ''' + iniparse_tester.run_parsing_test(parse_sections, info, expected, None) + + +@pytest.mark.parametrize("info, expected", [ + # Test exception with repeat sections + (['[test]', '#', '[test]'], {}), + # Test non-comment chars before the first section + (['a#', '#', '[test]'], {}), +]) +def test_parse_sections_exception(iniparse_tester, info, expected): + ''' + Tests our base function used to parse sections before we do any + processing + + Ensures: + + * we can handle comments around sections. + * caps issues + + ''' + iniparse_tester.run_parsing_test(parse_sections, info, expected, Exception) + + +@pytest.mark.parametrize('info, expected', [ + # Test simple config item parsing + (['a:10'], {'a': '10'}), + # Test a value thats a list parse with a line return which should + # simply merge them + (['a:10,', '15,20'], {'a': '10, 15, 20'}), + # Test interpreting master file properties that span multiple lines + (['a: default=10', 'options=[10 15 20]'], { + 'a': 'default=10 options=[10 15 20]'}), +]) +def test_parse_items(iniparse_tester, info, expected): + ''' + Tests our base function used to parse items after reading sections but + before processing values. + + * tests for wrapped lists + * tests to remove bracketed lists with comments + * tests for properties being added in master files + ''' + # Assign a section to the info which are always OrderedDict + info = OrderedDict({'s': info}) + expected = {'s': OrderedDict(expected)} + iniparse_tester.run_parsing_test(parse_items, info, expected, None) + + +@pytest.mark.parametrize('info, expected', [ + # Test parse values that might have excess chars + ('test1,\ttest2, test3', ['test1', 'test2', 'test3']), + # Test parsing master properties parsing + ('\tdefault=10,\toptions=[10 15 20]', [ + 'default=10', 'options=[10 15 20]']), +]) +def test_parse_values(iniparse_tester, info, expected): + ''' + test parse values + + Ensures: + * Cleans up values with list entries + * Cleans up properties we recieve including non-comma sep lists + ''' + # Assign info to a place in a dict of dict which are all ordered + info = OrderedDict({'s': OrderedDict({'i': info})}) + expected = OrderedDict({'s': OrderedDict({'i': expected})}) + iniparse_tester.run_parsing_test(parse_values, info, expected, None) + + +@pytest.mark.parametrize('info, expected', [ + # Test interpreting renaming a section + (['section/item -> new_section/item'], [['section', 'item', + 'any', 'any'], ['new_section', 'item', 'any', 'any']]), + # Test interpreting renaming an item + (['section/item -> section/new_item'], [['section', 'item', + 'any', 'any'], ['section', 'new_item', 'any', 'any']]), + # test syntax error@pytest.mark.parametrize('info, expected', [ + # Test interpreting renaming a section + (['section/item -> new_section/item'], [['section', 'item', + 'any', 'any'], ['new_section', 'item', 'any', 'any']]), + # Test interpreting renaming an item + (['section/item -> section/new_item'], [['section', 'item', + 'any', 'any'], ['section', 'new_item', 'any', 'any']]), +]) +def test_parse_changes(iniparse_tester, info, expected): + ''' + Tests tha change lof parsing. Ensures we raise a value error for invalid + syntax and that valid syntax is parsed correctly + ''' + iniparse_tester.run_parsing_test(parse_changes, info, [expected], None) + + +@pytest.mark.parametrize('info, expected', [ + # Test syntax error + (['section/item > REMOVED'], []), +]) +def test_parse_changes_exception(iniparse_tester, info, expected): + ''' + Tests tha change lof parsing. Ensures we raise a value error for invalid + syntax and that valid syntax is parsed correctly + ''' + iniparse_tester.run_parsing_test( + parse_changes, info, [expected], ValueError) diff --git a/tests/test_tools.py b/tests/test_tools.py index 07b0175..49493b9 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -13,96 +13,105 @@ from inicheck.tools import * -class TestTools(): - @classmethod - def setup_class(self): +class ToolTester(): + def __init__(self): base = os.path.dirname(__file__) self.ucfg = get_user_config(os.path.join(base, "test_configs/full_config.ini"), modules="inicheck") - def test_get_checkers(self): - """ - Tests the get_checkers func in tools - """ - checkers = get_checkers().keys() - valids = ['bool', 'criticaldirectory', 'criticalfilename', 'datetime', - 'datetimeorderedpair', 'directory', 'filename', 'float', - 'int', 'string', 'url'] +@pytest.fixture +def tool_tester(): + return ToolTester() - for v in valids: - assert v in checkers - valids = ["type", "generic", "path"] - checkers = get_checkers(ignore=[]).keys() - for v in valids: - assert v in checkers +def test_get_checkers(tool_tester): + """ + Tests the get_checkers func in tools + """ + checkers = get_checkers().keys() - def test_check_config(self): - """ - Tests the check_config func in tools - """ - ucfg = self.ucfg + valids = ['bool', 'criticaldirectory', 'criticalfilename', 'datetime', + 'datetimeorderedpair', 'directory', 'filename', 'float', + 'int', 'string', 'url'] - warnings, errors = check_config(ucfg) + for v in valids: + assert v in checkers - assert len(errors) == 11 + valids = ["type", "generic", "path"] + checkers = get_checkers(ignore=[]).keys() + for v in valids: + assert v in checkers - def test_cast_all_variables(self): - """ - Tests the cast_all_variables func in tools - """ - ucfg = self.ucfg - ucfg.cfg['topo']['test_start'] = "10-1-2019" - ucfg.cfg['air_temp']['dk_ncores'] = "1.0" - ucfg.cfg['air_temp']['detrend'] = "true" +def test_check_config(tool_tester): + """ + Tests the check_config func in tools + """ + ucfg = tool_tester.ucfg - ucfg = cast_all_variables(ucfg, ucfg.mcfg) - results = ['datetime', 'int', 'bool', 'float', 'list', 'str'] + warnings, errors = check_config(ucfg) - tests = [ucfg.cfg['time']['start_date'], - ucfg.cfg['air_temp']['dk_ncores'], - ucfg.cfg['air_temp']['detrend'], - ucfg.cfg['wind']['reduction_factor'], - ucfg.cfg['output']['variables'], - ucfg.cfg['air_temp']['distribution']] + assert len(errors) == 11 - for i, v in enumerate(tests): - assert results[i] in type(v).__name__.lower() - def test_get_user_config(self): - """ - Tests getting the user config - """ +def test_cast_all_variables(tool_tester): + """ + Tests the cast_all_variables func in tools + """ + ucfg = tool_tester.ucfg - base = os.path.dirname(__file__) - path = os.path.join(base, "test_configs/full_config.ini") + ucfg.cfg['topo']['test_start'] = "10-1-2019" + ucfg.cfg['air_temp']['dk_ncores'] = "1.0" + ucfg.cfg['air_temp']['detrend'] = "true" + + ucfg = cast_all_variables(ucfg, ucfg.mcfg) + results = ['datetime', 'int', 'bool', 'float', 'list', 'str'] + + tests = [ucfg.cfg['time']['start_date'], + ucfg.cfg['air_temp']['dk_ncores'], + ucfg.cfg['air_temp']['detrend'], + ucfg.cfg['wind']['reduction_factor'], + ucfg.cfg['output']['variables'], + ucfg.cfg['air_temp']['distribution']] + + for i, v in enumerate(tests): + assert results[i] in type(v).__name__.lower() - # check for the Exception - with pytest.raises(IOError): - get_user_config(path) - with pytest.raises(IOError): - get_user_config('not_a_file.ini') +def test_get_user_config(tool_tester): + """ + Tests getting the user config + """ - ucfg = get_user_config(path, modules='inicheck') - assert ucfg + base = os.path.dirname(__file__) + path = os.path.join(base, "test_configs/full_config.ini") - def test_config_documentation(self): - """ - Confirms that we still make config documentation - """ - # Confirm exception when file doesnt exist - with pytest.raises(IOError): - f = '/no/folder/exists/f.rst' - config_documentation(f, modules='inicheck') + # check for the Exception + with pytest.raises(IOError): + get_user_config(path) - # Try it hope it runs - f = 'test.rst' + with pytest.raises(IOError): + get_user_config('not_a_file.ini') + + ucfg = get_user_config(path, modules='inicheck') + assert ucfg + + +def test_config_documentation(tool_tester): + """ + Confirms that we still make config documentation + """ + # Confirm exception when file doesnt exist + with pytest.raises(IOError): + f = '/no/folder/exists/f.rst' config_documentation(f, modules='inicheck') - assert True - # Clean up - os.remove(f) + # Try it hope it runs + f = 'test.rst' + config_documentation(f, modules='inicheck') + assert True + + # Clean up + os.remove(f) From 10b12689e2589e207d6a5beb6b548846801dee2b Mon Sep 17 00:00:00 2001 From: Joachim Meyer Date: Fri, 29 Jan 2021 08:56:45 -0700 Subject: [PATCH 07/21] PR feedback - Suggestion for fixture use. --- tests/conftest.py | 11 ++ tests/test_checkers.py | 375 ++++++++++++++++++++++++----------------- 2 files changed, 236 insertions(+), 150 deletions(-) create mode 100644 tests/conftest.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..3d89d20 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,11 @@ +import os +from pathlib import Path + +import pytest + +TEST_ROOT = os.path.dirname(os.path.abspath(__file__)) + + +@pytest.fixture(scope='session', autouse=True) +def test_config_dir(): + return Path(TEST_ROOT).joinpath('test_configs') diff --git a/tests/test_checkers.py b/tests/test_checkers.py index 3131f6d..b915479 100644 --- a/tests/test_checkers.py +++ b/tests/test_checkers.py @@ -1,93 +1,93 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -test_checkers ----------------------------------- +import os -Tests for `inicheck.checkers` module. -""" - -import inicheck import pytest -from inicheck.checkers import * + +from inicheck import checkers as checkers from inicheck.config import MasterConfig, UserConfig -class CheckerTester(): - def __init__(self): - tests_p = os.path.join(os.path.dirname(inicheck.__file__), '../tests') - self.mcfg = MasterConfig(path=os.path.join(tests_p, - 'test_configs/master.ini')) - - self.ucfg = UserConfig(os.path.join(tests_p, "test_configs/base_cfg.ini"), - mcfg=self.mcfg) - - def run_a_checker(self, valids, invalids, checker, section='basic', - item='item', - extra_config={}): - """ - Runs a loop over all the valids and applies the checker and asserts - theyre true. Same thing is done for the invalids - Args: - valids: List of valid entries to check - invalids: List of invalid entries to check - checker: any class in inicheck.checkers - ucfg: inicheck.config.UserConfig instance (optional) - is_list: is it expected to be a list? - section: section name the item being checked is occurring - item: Item name in the config - extra_config: Pass in contextual config info to test more - complicated checkers. E.g. ordered datetime pair. - """ - - cfg = self.ucfg.cfg - cfg.update({section: {item: " "}}) - - # Added info for testing e.g. ordered datetime pair - if extra_config: - cfg.update(extra_config) - - for z, values in enumerate([valids, invalids]): - for v in values: - - cfg[section][item] = v - b = checker(config=self.ucfg, section=section, item=item) - msgs = b.check() - - if len([True for m in msgs if m is None]) == len(msgs): - valid = True - else: - valid = False - - # Expected valid - if z == 0: - assert valid - else: - assert not valid +def run_a_checker( + user_config, + valid_entries, + invalid_entries, + checker, + section='basic', + item='item', + extra_config=None +): + """ + Runs a loop over all the valid entries and applies the checker and asserts + they are true. Same thing is done for the invalid entries. + Args: + user_config: inicheck.config.UserConfig instance + valid_entries: List of valid entries to check + invalid_entries: List of invalid entries to check + checker: Any class in inicheck.checkers + section: Section name the item being checked is occurring + item: Item name in the config + extra_config: Pass in contextual config info to test more + complicated checkers. E.g. ordered datetime pair. + """ + + config = user_config.cfg + config.update({section: {item: " "}}) + + # Added info for testing e.g. ordered datetime pair + if extra_config: + config.update(extra_config) + + for z, values in enumerate([valid_entries, invalid_entries]): + for v in values: + + config[section][item] = v + b = checker(config=user_config, section=section, item=item) + msgs = b.check() + + if len([True for m in msgs if m is None]) == len(msgs): + valid = True + else: + valid = False + + # Expected valid + if z == 0: + assert valid + else: + assert not valid @pytest.fixture -def check_tester(): +def master_config(test_config_dir): """ - Create some key structures for testing + Master config from test data """ - cls = CheckerTester() - return cls + return MasterConfig( + path=test_config_dir.joinpath('master.ini').as_posix() + ) + +@pytest.fixture +def user_config(test_config_dir, master_config): + return UserConfig( + test_config_dir.joinpath('base_cfg.ini').as_posix(), mcfg=master_config + ) -def test_string(check_tester): + +def test_string(user_config): """ Test we see strings as strings """ + valid_entries = ['test'] - # Confirm we these values are valid - valids = ['test'] - check_tester.run_a_checker(valids, [], CheckString, item='username') + run_a_checker( + user_config, valid_entries, [], checkers.CheckString, item='username' + ) # Confirm that casting a string with uppers will auto produce lowers - check_tester.ucfg.cfg['basic']['username'] = 'Test' + user_config.cfg['basic']['username'] = 'Test' - b = CheckString(config=check_tester.ucfg, section='basic', item='username') + b = checkers.CheckString( + config=user_config, section='basic', item='username' + ) result = b.cast() assert result == 'test' @@ -96,166 +96,241 @@ def test_string(check_tester): result = b.cast() assert result == ['test'] - # Check we capture the when alist is passed and were not expecting one + # Check we capture the when a list is passed and were not expecting one b.is_list = False result = b.cast() assert not isinstance(result, list) -def test_bool(check_tester): +def test_bool(user_config): """ Test we see booleans as booleans """ + valid_entries = [True, False, 'true', 'FALSE', 'yes', 'y', 'no', 'n'] + invalid_entries = ['Fasle', 'treu'] - # Confirm we these values are valid - valids = [True, False, 'true', 'FALSE', 'yes', 'y', 'no', 'n'] - invalids = ['Fasle', 'treu'] - check_tester.run_a_checker(valids, invalids, CheckBool, item='debug') + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckBool, + item='debug' + ) -def test_float(check_tester): +def test_float(user_config): """ Test we see floats as floats """ - valids = [-1.5, '2.5'] - invalids = ['tough'] + valid_entries = [-1.5, '2.5'] + invalid_entries = ['tough'] - check_tester.run_a_checker(valids, invalids, CheckFloat, item='time_out') + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckFloat, + item='time_out' + ) -def test_int(check_tester): +def test_int(user_config): """ Test we see int as ints and not floats """ + valid_entries = [10, '2', 1.0] + invalid_entries = ['tough', '1.5', ''] - # Confirm we these values are valid - valids = [10, '2', 1.0] - invalids = ['tough', '1.5', ''] - check_tester.run_a_checker(valids, invalids, CheckInt, item='num_users') + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckInt, + item='num_users' + ) -def test_datetime(check_tester): +def test_datetime(user_config): """ Test we see datetime as datetime """ + valid_entries = ['2018-01-10 10:10', '10-10-2018', "October 10 2018"] + invalid_entries = ['Not-a-date', 'Wednesday 5th'] - valids = ['2018-01-10 10:10', '10-10-2018', "October 10 2018"] - invalids = ['Not-a-date', 'Wednesday 5th'] - check_tester.run_a_checker( - valids, - invalids, - CheckDatetime, - item='start_date') + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckDatetime, + item='start_date' + ) -def test_list(check_tester): +def test_list(user_config): """ Test our listing methods using lists of dates. """ + valid_entries = [ + '10-10-2019', ['10-10-2019'], ['10-10-2019', '11-10-2019'] + ] - valids = ['10-10-2019', ['10-10-2019'], ['10-10-2019', '11-10-2019']] - check_tester.run_a_checker(valids, [], CheckDatetime, item='epochs') + run_a_checker( + user_config, valid_entries, [], checkers.CheckDatetime, item='epochs' + ) -def test_directory(check_tester): +def test_directory(user_config): """ Tests the base class for path based checkers """ + valid_entries = ["./"] + invalid_entries = ['./somecrazy_location!/'] - valids = ["./"] - invalids = ['./somecrazy_location!/'] - check_tester.run_a_checker(valids, invalids, CheckDirectory, item='tmp') + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckDirectory, + item='tmp' + ) # ISSUE #44 check for default when string is empty - check_tester.ucfg.cfg.update({'basic': {'tmp': ''}}) - b = CheckDirectory(config=check_tester.ucfg, section='basic', item='tmp') - value = b.cast() + user_config.cfg.update({'basic': {'tmp': ''}}) + + value = checkers.CheckDirectory( + config=user_config, section='basic', item='tmp' + ).cast() + assert os.path.split(value)[-1] == 'temp' -def test_filename(check_tester): +def test_filename(user_config): """ Tests the base class for path based checkers """ # Remember paths are relative to the config - valids = ["../test_checkers.py"] - invalids = ['dumbfilename'] - check_tester.run_a_checker(valids, invalids, CheckFilename, item='log') + valid_entries = ["../test_checkers.py"] + invalid_entries = ['dumbfilename'] + + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckFilename, + item='log' + ) + + +def test_filename_empty_string(user_config): + """ + ISSUE #44 check for default when string is empty + """ + user_config.cfg.update({'basic': {'log': ''}}) + user_config.mcfg.cfg['basic']['log'].default = None + + value = checkers.CheckFilename( + config=user_config, section='basic', item='log' + ).cast() - # ISSUE #44 check for default when string is empty - check_tester.ucfg.cfg.update({'basic': {'log': ''}}) - check_tester.ucfg.mcfg.cfg['basic']['log'].default = None - b = CheckFilename(config=check_tester.ucfg, section='basic', item='log') - value = b.cast() assert value is None - # ISSUE #44 check for default when string is empty but default is a - # path - check_tester.ucfg.cfg.update({'basic': {'log': ''}}) - check_tester.ucfg.mcfg.cfg['basic']['log'].default = 'log.txt' - b = CheckFilename(config=check_tester.ucfg, section='basic', item='log') - value = b.cast() + +def test_filename_empty_string_default_path(user_config): + """ + ISSUE #44 check for default when string is empty but default is a path + """ + user_config.cfg.update({'basic': {'log': ''}}) + user_config.mcfg.cfg['basic']['log'].default = 'log.txt' + + value = checkers.CheckFilename( + config=user_config, section='basic', item='log' + ).cast() + assert os.path.split(value)[-1] == 'log.txt' -def test_url(check_tester): +def test_url(user_config): """ Test our url checking. """ - valids = ["https://google.com"] - invalids = ["https://micah_subnaught_is_awesome.com"] - check_tester.run_a_checker(valids, invalids, CheckURL, - item='favorite_web_site') + valid_entries = ["https://google.com"] + invalid_entries = ["https://micah_subnaught_is_awesome.com"] + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckURL, + item='favorite_web_site' + ) -def test_datetime_ordered_pairs(check_tester): + +def test_datetime_ordered_pairs(user_config): """ Tests the ordered datetime pair checker which looks for _start _end pairs and confirms they occurs in the correct order. - """ - # Test end dates com after start dates starts = ["1-01-2019", "2019-10-01", "1998-01-14 15:00:00"] ends = ["1-02-2019", "2019-10-02", "1998-01-14 19:00:00"] - invalids_starts = ["01-01-2020", "2020-06-01", "1998-01-14 20:00:00"] - invalids_ends = ["01-01-2018", "2018-10-01", "1998-01-14 10:00:00", ] + invalid_entries_starts = [ + "01-01-2020", "2020-06-01", "1998-01-14 20:00:00" + ] + invalid_entries_ends = [ + "01-01-2018", "2018-10-01", "1998-01-14 10:00:00" + ] # Check for starts being before the end date for start, end, error_start, error_end in zip(starts, ends, - invalids_starts, - invalids_ends): + invalid_entries_starts, + invalid_entries_ends): # Check start values are before end values acfg = {'basic': {'end_date': end}} - check_tester.run_a_checker([start], [error_start], CheckDatetimeOrderedPair, - item="start_date", - extra_config=acfg) + + run_a_checker( + user_config, + [start], + [error_start], + checkers.CheckDatetimeOrderedPair, + item="start_date", + extra_config=acfg + ) # Check start values are before end values acfg = {'basic': {'start_date': start}} - check_tester.run_a_checker([end], [error_end], CheckDatetimeOrderedPair, - item="end_date", - extra_config=acfg) + + run_a_checker( + user_config, + [end], + [error_end], + checkers.CheckDatetimeOrderedPair, + item="end_date", + extra_config=acfg + ) # Check start end values are equal error acfg = {'basic': {'start_date': '2020-10-01'}} - check_tester.run_a_checker(["2020-10-02"], ["2020-10-01"], - CheckDatetimeOrderedPair, - item="end_date", - extra_config=acfg) + run_a_checker( + user_config, + ["2020-10-02"], + ["2020-10-01"], + checkers.CheckDatetimeOrderedPair, + item="end_date", + extra_config=acfg + ) -def test_bounds(check_tester): + +def test_bounds(user_config): """ MasterConfig options now have max and min values to constrain continuous types. This tests whether that works """ - - check_tester.run_a_checker([1.0, 0.0, '0.5'], [1.1, -1.0, '10'], CheckFloat, - item='fraction') - - -if __name__ == '__main__': - import sys - sys.exit(unittest.main()) + run_a_checker( + user_config, + [1.0, 0.0, '0.5'], + [1.1, -1.0, '10'], + checkers.CheckFloat, + item='fraction' + ) From e69d59669b6c1f0ef99a1121cfe4ef4469ef81c9 Mon Sep 17 00:00:00 2001 From: Joachim Meyer Date: Fri, 29 Jan 2021 16:10:55 -0700 Subject: [PATCH 08/21] PyTest - Make use of fixture scopes. Put all test cases into a class to utilize fixture scopes and cache the MasterConfig to be only loaded once for the class run. --- tests/test_checkers.py | 472 ++++++++++++++++++++--------------------- 1 file changed, 234 insertions(+), 238 deletions(-) diff --git a/tests/test_checkers.py b/tests/test_checkers.py index b915479..0620584 100644 --- a/tests/test_checkers.py +++ b/tests/test_checkers.py @@ -55,282 +55,278 @@ def run_a_checker( assert not valid -@pytest.fixture -def master_config(test_config_dir): - """ - Master config from test data - """ - return MasterConfig( - path=test_config_dir.joinpath('master.ini').as_posix() - ) - - -@pytest.fixture -def user_config(test_config_dir, master_config): - return UserConfig( - test_config_dir.joinpath('base_cfg.ini').as_posix(), mcfg=master_config - ) - - -def test_string(user_config): - """ - Test we see strings as strings - """ - valid_entries = ['test'] - - run_a_checker( - user_config, valid_entries, [], checkers.CheckString, item='username' - ) - - # Confirm that casting a string with uppers will auto produce lowers - user_config.cfg['basic']['username'] = 'Test' - - b = checkers.CheckString( - config=user_config, section='basic', item='username' - ) - result = b.cast() - assert result == 'test' - - # Check we can capture a single item list for strings - b.is_list = True - result = b.cast() - assert result == ['test'] - - # Check we capture the when a list is passed and were not expecting one - b.is_list = False - result = b.cast() - assert not isinstance(result, list) - - -def test_bool(user_config): - """ - Test we see booleans as booleans - """ - valid_entries = [True, False, 'true', 'FALSE', 'yes', 'y', 'no', 'n'] - invalid_entries = ['Fasle', 'treu'] - - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckBool, - item='debug' - ) - - -def test_float(user_config): - """ - Test we see floats as floats - """ - valid_entries = [-1.5, '2.5'] - invalid_entries = ['tough'] - - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckFloat, - item='time_out' - ) - - -def test_int(user_config): - """ - Test we see int as ints and not floats - """ - valid_entries = [10, '2', 1.0] - invalid_entries = ['tough', '1.5', ''] - - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckInt, - item='num_users' - ) +class TestCheckers: + @pytest.fixture(scope='class') + def master_config(self, test_config_dir): + """ + Master config from test data + """ + return MasterConfig( + path=test_config_dir.joinpath('master.ini').as_posix() + ) + @pytest.fixture + def user_config(self, test_config_dir, master_config): + return UserConfig( + test_config_dir.joinpath('base_cfg.ini').as_posix(), + mcfg=master_config + ) -def test_datetime(user_config): - """ - Test we see datetime as datetime - """ - valid_entries = ['2018-01-10 10:10', '10-10-2018', "October 10 2018"] - invalid_entries = ['Not-a-date', 'Wednesday 5th'] + def test_string(self, user_config): + """ + Test we see strings as strings + """ + valid_entries = ['test'] - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckDatetime, - item='start_date' - ) + run_a_checker( + user_config, + valid_entries, + [], + checkers.CheckString, + item='username' + ) + # Confirm that casting a string with uppers will auto produce lowers + user_config.cfg['basic']['username'] = 'Test' -def test_list(user_config): - """ - Test our listing methods using lists of dates. - """ - valid_entries = [ - '10-10-2019', ['10-10-2019'], ['10-10-2019', '11-10-2019'] - ] + b = checkers.CheckString( + config=user_config, section='basic', item='username' + ) + result = b.cast() + assert result == 'test' + + # Check we can capture a single item list for strings + b.is_list = True + result = b.cast() + assert result == ['test'] + + # Check we capture the when a list is passed and were not expecting one + b.is_list = False + result = b.cast() + assert not isinstance(result, list) + + def test_bool(self, user_config): + """ + Test we see booleans as booleans + """ + valid_entries = [True, False, 'true', 'FALSE', 'yes', 'y', 'no', 'n'] + invalid_entries = ['Fasle', 'treu'] - run_a_checker( - user_config, valid_entries, [], checkers.CheckDatetime, item='epochs' - ) + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckBool, + item='debug' + ) + def test_float(self, user_config): + """ + Test we see floats as floats + """ + valid_entries = [-1.5, '2.5'] + invalid_entries = ['tough'] -def test_directory(user_config): - """ - Tests the base class for path based checkers - """ - valid_entries = ["./"] - invalid_entries = ['./somecrazy_location!/'] + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckFloat, + item='time_out' + ) - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckDirectory, - item='tmp' - ) + def test_int(self, user_config): + """ + Test we see int as ints and not floats + """ + valid_entries = [10, '2', 1.0] + invalid_entries = ['tough', '1.5', ''] - # ISSUE #44 check for default when string is empty - user_config.cfg.update({'basic': {'tmp': ''}}) + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckInt, + item='num_users' + ) - value = checkers.CheckDirectory( - config=user_config, section='basic', item='tmp' - ).cast() + def test_datetime(self, user_config): + """ + Test we see datetime as datetime + """ + valid_entries = ['2018-01-10 10:10', '10-10-2018', "October 10 2018"] + invalid_entries = ['Not-a-date', 'Wednesday 5th'] - assert os.path.split(value)[-1] == 'temp' + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckDatetime, + item='start_date' + ) + def test_list(self, user_config): + """ + Test our listing methods using lists of dates. + """ + valid_entries = [ + '10-10-2019', ['10-10-2019'], ['10-10-2019', '11-10-2019'] + ] -def test_filename(user_config): - """ - Tests the base class for path based checkers - """ - # Remember paths are relative to the config - valid_entries = ["../test_checkers.py"] - invalid_entries = ['dumbfilename'] + run_a_checker( + user_config, + valid_entries, + [], + checkers.CheckDatetime, + item='epochs' + ) - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckFilename, - item='log' - ) + def test_directory(self, user_config): + """ + Tests the base class for path based checkers + """ + valid_entries = ["./"] + invalid_entries = ['./somecrazy_location!/'] + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckDirectory, + item='tmp' + ) -def test_filename_empty_string(user_config): - """ - ISSUE #44 check for default when string is empty - """ - user_config.cfg.update({'basic': {'log': ''}}) - user_config.mcfg.cfg['basic']['log'].default = None + # ISSUE #44 check for default when string is empty + user_config.cfg.update({'basic': {'tmp': ''}}) - value = checkers.CheckFilename( - config=user_config, section='basic', item='log' - ).cast() + value = checkers.CheckDirectory( + config=user_config, section='basic', item='tmp' + ).cast() - assert value is None + assert os.path.split(value)[-1] == 'temp' + def test_filename(self, user_config): + """ + Tests the base class for path based checkers + """ + # Remember paths are relative to the config + valid_entries = ["../test_checkers.py"] + invalid_entries = ['dumbfilename'] -def test_filename_empty_string_default_path(user_config): - """ - ISSUE #44 check for default when string is empty but default is a path - """ - user_config.cfg.update({'basic': {'log': ''}}) - user_config.mcfg.cfg['basic']['log'].default = 'log.txt' + run_a_checker( + user_config, + valid_entries, + invalid_entries, + checkers.CheckFilename, + item='log' + ) - value = checkers.CheckFilename( - config=user_config, section='basic', item='log' - ).cast() + def test_filename_empty_string(self, user_config): + """ + ISSUE #44 check for default when string is empty + """ + user_config.cfg.update({'basic': {'log': ''}}) + user_config.mcfg.cfg['basic']['log'].default = None - assert os.path.split(value)[-1] == 'log.txt' + value = checkers.CheckFilename( + config=user_config, section='basic', item='log' + ).cast() + assert value is None -def test_url(user_config): - """ - Test our url checking. - """ - valid_entries = ["https://google.com"] - invalid_entries = ["https://micah_subnaught_is_awesome.com"] + def test_filename_empty_string_default_path(self, user_config): + """ + ISSUE #44 check for default when string is empty but default is a path + """ + user_config.cfg.update({'basic': {'log': ''}}) + user_config.mcfg.cfg['basic']['log'].default = 'log.txt' - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckURL, - item='favorite_web_site' - ) + value = checkers.CheckFilename( + config=user_config, section='basic', item='log' + ).cast() + assert os.path.split(value)[-1] == 'log.txt' -def test_datetime_ordered_pairs(user_config): - """ - Tests the ordered datetime pair checker which looks for _start - _end pairs and confirms they occurs in the correct order. - """ - # Test end dates com after start dates - starts = ["1-01-2019", "2019-10-01", "1998-01-14 15:00:00"] - ends = ["1-02-2019", "2019-10-02", "1998-01-14 19:00:00"] - - invalid_entries_starts = [ - "01-01-2020", "2020-06-01", "1998-01-14 20:00:00" - ] - invalid_entries_ends = [ - "01-01-2018", "2018-10-01", "1998-01-14 10:00:00" - ] - - # Check for starts being before the end date - for start, end, error_start, error_end in zip(starts, ends, - invalid_entries_starts, - invalid_entries_ends): - # Check start values are before end values - acfg = {'basic': {'end_date': end}} + def test_url(self, user_config): + """ + Test our url checking. + """ + valid_entries = ["https://google.com"] + invalid_entries = ["https://micah_subnaught_is_awesome.com"] run_a_checker( user_config, - [start], - [error_start], - checkers.CheckDatetimeOrderedPair, - item="start_date", - extra_config=acfg + valid_entries, + invalid_entries, + checkers.CheckURL, + item='favorite_web_site' ) - # Check start values are before end values - acfg = {'basic': {'start_date': start}} + def test_datetime_ordered_pairs(self, user_config): + """ + Tests the ordered datetime pair checker which looks for _start + _end pairs and confirms they occurs in the correct order. + """ + # Test end dates com after start dates + starts = ["1-01-2019", "2019-10-01", "1998-01-14 15:00:00"] + ends = ["1-02-2019", "2019-10-02", "1998-01-14 19:00:00"] + + invalid_entries_starts = [ + "01-01-2020", "2020-06-01", "1998-01-14 20:00:00" + ] + invalid_entries_ends = [ + "01-01-2018", "2018-10-01", "1998-01-14 10:00:00" + ] + + # Check for starts being before the end date + for start, end, error_start, error_end in zip(starts, ends, + invalid_entries_starts, + invalid_entries_ends): + # Check start values are before end values + acfg = {'basic': {'end_date': end}} + + run_a_checker( + user_config, + [start], + [error_start], + checkers.CheckDatetimeOrderedPair, + item="start_date", + extra_config=acfg + ) + + # Check start values are before end values + acfg = {'basic': {'start_date': start}} + + run_a_checker( + user_config, + [end], + [error_end], + checkers.CheckDatetimeOrderedPair, + item="end_date", + extra_config=acfg + ) + + # Check start end values are equal error + acfg = {'basic': {'start_date': '2020-10-01'}} run_a_checker( user_config, - [end], - [error_end], + ["2020-10-02"], + ["2020-10-01"], checkers.CheckDatetimeOrderedPair, item="end_date", extra_config=acfg ) - # Check start end values are equal error - acfg = {'basic': {'start_date': '2020-10-01'}} - - run_a_checker( - user_config, - ["2020-10-02"], - ["2020-10-01"], - checkers.CheckDatetimeOrderedPair, - item="end_date", - extra_config=acfg - ) - - -def test_bounds(user_config): - """ - MasterConfig options now have max and min values to constrain continuous - types. This tests whether that works - """ - run_a_checker( - user_config, - [1.0, 0.0, '0.5'], - [1.1, -1.0, '10'], - checkers.CheckFloat, - item='fraction' - ) + def test_bounds(self, user_config): + """ + MasterConfig options now have max and min values to constrain + continuous types. This tests whether that works + """ + run_a_checker( + user_config, + [1.0, 0.0, '0.5'], + [1.1, -1.0, '10'], + checkers.CheckFloat, + item='fraction' + ) From 57167bcaf066c18510318e221ac443d189aa5d98 Mon Sep 17 00:00:00 2001 From: Joachim Meyer Date: Fri, 29 Jan 2021 16:30:19 -0700 Subject: [PATCH 09/21] PyTest - Add suggestion for parameterizing tests. --- tests/test_checkers.py | 39 ++++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/tests/test_checkers.py b/tests/test_checkers.py index 0620584..42a531e 100644 --- a/tests/test_checkers.py +++ b/tests/test_checkers.py @@ -55,6 +55,14 @@ def run_a_checker( assert not valid +def define_checker(user_config, checker, value, item='item', section='basic'): + config = user_config.cfg + config.update({section: {item: " "}}) + config[section][item] = value + + return checker(config=user_config, section=section, item=item) + + class TestCheckers: @pytest.fixture(scope='class') def master_config(self, test_config_dir): @@ -105,21 +113,38 @@ def test_string(self, user_config): result = b.cast() assert not isinstance(result, list) - def test_bool(self, user_config): + @pytest.mark.parametrize( + 'value', [True, False, 'true', 'FALSE', 'yes', 'y', 'no', 'n'] + ) + def test_valid_booleans(self, user_config, value): """ - Test we see booleans as booleans + Test accepted boolean values """ - valid_entries = [True, False, 'true', 'FALSE', 'yes', 'y', 'no', 'n'] - invalid_entries = ['Fasle', 'treu'] + checker = define_checker( + user_config, + checkers.CheckBool, + value, + item='debug' + ) - run_a_checker( + assert checker.check()[0] is None + + @pytest.mark.parametrize( + 'value', ['Fasle', 'treu', 'F', 'T'] + ) + def test_invalid_booleans(self, user_config, value): + """ + Test rejected boolean values + """ + checker = define_checker( user_config, - valid_entries, - invalid_entries, checkers.CheckBool, + value, item='debug' ) + assert checker.check()[0] is not None + def test_float(self, user_config): """ Test we see floats as floats From c86b4a6e976e3ab3a62d9190d8b4b3af638d6c16 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Wed, 27 Oct 2021 12:53:37 -0600 Subject: [PATCH 10/21] Migrated to a more fixture oriented testing of the checkers. --- tests/test_checkers.py | 505 +++++++++++++++-------------------------- 1 file changed, 179 insertions(+), 326 deletions(-) diff --git a/tests/test_checkers.py b/tests/test_checkers.py index 42a531e..4f1e267 100644 --- a/tests/test_checkers.py +++ b/tests/test_checkers.py @@ -2,70 +2,16 @@ import pytest -from inicheck import checkers as checkers +from inicheck import checkers from inicheck.config import MasterConfig, UserConfig +from .conftest import TEST_ROOT +from os.path import join +class CheckerTestBase: + checker_cls = None -def run_a_checker( - user_config, - valid_entries, - invalid_entries, - checker, - section='basic', - item='item', - extra_config=None -): - """ - Runs a loop over all the valid entries and applies the checker and asserts - they are true. Same thing is done for the invalid entries. - Args: - user_config: inicheck.config.UserConfig instance - valid_entries: List of valid entries to check - invalid_entries: List of invalid entries to check - checker: Any class in inicheck.checkers - section: Section name the item being checked is occurring - item: Item name in the config - extra_config: Pass in contextual config info to test more - complicated checkers. E.g. ordered datetime pair. - """ - - config = user_config.cfg - config.update({section: {item: " "}}) - - # Added info for testing e.g. ordered datetime pair - if extra_config: - config.update(extra_config) - - for z, values in enumerate([valid_entries, invalid_entries]): - for v in values: - - config[section][item] = v - b = checker(config=user_config, section=section, item=item) - msgs = b.check() - - if len([True for m in msgs if m is None]) == len(msgs): - valid = True - else: - valid = False - - # Expected valid - if z == 0: - assert valid - else: - assert not valid - - -def define_checker(user_config, checker, value, item='item', section='basic'): - config = user_config.cfg - config.update({section: {item: " "}}) - config[section][item] = value - - return checker(config=user_config, section=section, item=item) - - -class TestCheckers: - @pytest.fixture(scope='class') - def master_config(self, test_config_dir): + @pytest.fixture(scope='function') + def master_config(self, test_config_dir, section, item): """ Master config from test data """ @@ -80,278 +26,185 @@ def user_config(self, test_config_dir, master_config): mcfg=master_config ) - def test_string(self, user_config): - """ - Test we see strings as strings - """ - valid_entries = ['test'] - - run_a_checker( - user_config, - valid_entries, - [], - checkers.CheckString, - item='username' - ) - - # Confirm that casting a string with uppers will auto produce lowers - user_config.cfg['basic']['username'] = 'Test' - - b = checkers.CheckString( - config=user_config, section='basic', item='username' - ) - result = b.cast() - assert result == 'test' - - # Check we can capture a single item list for strings - b.is_list = True - result = b.cast() - assert result == ['test'] - - # Check we capture the when a list is passed and were not expecting one - b.is_list = False - result = b.cast() - assert not isinstance(result, list) - - @pytest.mark.parametrize( - 'value', [True, False, 'true', 'FALSE', 'yes', 'y', 'no', 'n'] - ) - def test_valid_booleans(self, user_config, value): - """ - Test accepted boolean values - """ - checker = define_checker( - user_config, - checkers.CheckBool, - value, - item='debug' - ) - - assert checker.check()[0] is None - - @pytest.mark.parametrize( - 'value', ['Fasle', 'treu', 'F', 'T'] - ) - def test_invalid_booleans(self, user_config, value): - """ - Test rejected boolean values - """ - checker = define_checker( - user_config, - checkers.CheckBool, - value, - item='debug' - ) - - assert checker.check()[0] is not None - - def test_float(self, user_config): - """ - Test we see floats as floats - """ - valid_entries = [-1.5, '2.5'] - invalid_entries = ['tough'] - - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckFloat, - item='time_out' - ) - - def test_int(self, user_config): - """ - Test we see int as ints and not floats - """ - valid_entries = [10, '2', 1.0] - invalid_entries = ['tough', '1.5', ''] - - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckInt, - item='num_users' - ) - - def test_datetime(self, user_config): - """ - Test we see datetime as datetime - """ - valid_entries = ['2018-01-10 10:10', '10-10-2018', "October 10 2018"] - invalid_entries = ['Not-a-date', 'Wednesday 5th'] - - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckDatetime, - item='start_date' - ) - - def test_list(self, user_config): - """ - Test our listing methods using lists of dates. - """ - valid_entries = [ - '10-10-2019', ['10-10-2019'], ['10-10-2019', '11-10-2019'] - ] - - run_a_checker( - user_config, - valid_entries, - [], - checkers.CheckDatetime, - item='epochs' - ) - - def test_directory(self, user_config): - """ - Tests the base class for path based checkers - """ - valid_entries = ["./"] - invalid_entries = ['./somecrazy_location!/'] - - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckDirectory, - item='tmp' - ) - - # ISSUE #44 check for default when string is empty - user_config.cfg.update({'basic': {'tmp': ''}}) - - value = checkers.CheckDirectory( - config=user_config, section='basic', item='tmp' - ).cast() - - assert os.path.split(value)[-1] == 'temp' - - def test_filename(self, user_config): - """ - Tests the base class for path based checkers - """ - # Remember paths are relative to the config - valid_entries = ["../test_checkers.py"] - invalid_entries = ['dumbfilename'] - - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckFilename, - item='log' - ) - - def test_filename_empty_string(self, user_config): - """ - ISSUE #44 check for default when string is empty - """ - user_config.cfg.update({'basic': {'log': ''}}) - user_config.mcfg.cfg['basic']['log'].default = None + @pytest.fixture + def checker(self, user_config, section, item, value, extra_config): + user_config.cfg.update({section: {item: value}}) + + # Added info for testing e.g. ordered datetime pair + if extra_config is not None: + user_config.cfg[section].update(extra_config) + return self.checker_cls(config=user_config, section=section, item=item) + + def check_value(self, checker_obj): + """ + Returns whether the value result in any errors or warnings + Args: + checker_obj: + Returns: + Bool: indicating whether an error was returned + """ + result = checker_obj.check() + return result[0] is None + + +class TestCheckString(CheckerTestBase): + checker_cls = checkers.CheckString + + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'username', 'test', None, True), + ('basic', 'username', 'Test', None, True), + ]) + def test_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid + + +class TestCheckBool(CheckerTestBase): + checker_cls = checkers.CheckBool + + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'debug', True, None, True), + ('basic', 'debug', True, None, True), + ('basic', 'debug', 'True', None, True), + ('basic', 'debug', 'False', None, True), + ('basic', 'debug', 'yes', None, True), + ('basic', 'debug', 'y', None, True), + ('basic', 'debug', 'no', None, True), + ('basic', 'debug', 'n', None, True), + ('basic', 'debug', 'Fasle', None, False), + ('basic', 'debug', 'treu', None, False), + ('basic', 'debug', 'F', None, False), + ('basic', 'debug', 'T', None, False), + ]) + def test_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid + +class TestCheckFloat(CheckerTestBase): + checker_cls = checkers.CheckFloat + + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'time_out', -1.5, None, True), + ('basic', 'time_out', '2.5', None, True), + ('basic', 'time_out', '2.5', None, True), + ('basic', 'time_out', 2, None, True), + ('basic', 'time_out', 'tough', None, False), + ('basic', 'time_out', '2.a', None, False), + ]) + def test_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid + + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'fraction', 0.0, None, True), + ('basic', 'fraction', 1.0, None, True), + ('basic', 'fraction', '0.5', None, True), + ('basic', 'fraction', 1.5, None, False), + ]) + def test_bounds_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid + +class TestCheckInt(CheckerTestBase): + checker_cls = checkers.CheckInt + + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'num_users', 10, None, True), + ('basic', 'num_users', 1.0, None, True), + ('basic', 'num_users', '2', None, True), + ('basic', 'num_users', 'tough', None, False), + ('basic', 'num_users', '1.5', None, False), + ('basic', 'num_users', '', None, False), + + ]) + def test_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid + +class TestCheckDatetime(CheckerTestBase): + checker_cls = checkers.CheckDatetime + + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'start_date', '2018-01-10 10:10', None, True), + ('basic', 'start_date', '10-10-2018', None, True), + ('basic', 'start_date', 'October 10 2018', None, True), + ('basic', 'start_date', 'Not-A-date', None, False), + ('basic', 'start_date', 'Wednesday 5th', None, False), + ('basic', 'start_date', '02-31-20', None, False), + ]) + def test_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid + + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'epochs', ['January 1st 1970'], None, True), + ('basic', 'epochs', ['May 5th 2021', '2-31-2020'], None, True), + ]) + def test_listed_datetime_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid + + +class TestCheckDirectory(CheckerTestBase): + checker_cls = checkers.CheckDirectory + + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'tmp', './', None, True), + ('basic', 'tmp', './some_nonexistent_location', None, False), + ('basic', 'tmp', '', None, False), # ISSUE #44 check for invalid dir (default) when string is empty + ]) + def test_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid + +class TestCheckFilename(CheckerTestBase): + checker_cls = checkers.CheckFilename + + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'log', join(TEST_ROOT, 'test_checkers.py'), None, True), + ('basic', 'log', './non_existent_file.z', None, False), + ]) + def test_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid + + @pytest.mark.parametrize('section, item, default', [ + ('basic', 'log', '/var/log/log.txt'), + ]) + def test_filename_default_on_empty_string(self, user_config, section, item, default): + """ + Check noncritical path in the event an empty string is passed that a default is not None. + * ISSUE #44 check for default when string is empty + """ + # Assign empty string for file path + user_config.cfg.update({section: {item: ''}}) + user_config.mcfg.cfg[section][item].default = default value = checkers.CheckFilename( - config=user_config, section='basic', item='log' + config=user_config, section=section, item=item ).cast() - assert value is None + assert value is default - def test_filename_empty_string_default_path(self, user_config): - """ - ISSUE #44 check for default when string is empty but default is a path - """ - user_config.cfg.update({'basic': {'log': ''}}) - user_config.mcfg.cfg['basic']['log'].default = 'log.txt' - value = checkers.CheckFilename( - config=user_config, section='basic', item='log' - ).cast() +class TestCheckURL(CheckerTestBase): + checker_cls = checkers.CheckURL - assert os.path.split(value)[-1] == 'log.txt' + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + ('basic', 'favorite_web_site', 'https://google.com', None, True), + ('basic', 'favorite_web_site', 'https://micah_subnaught_is_awesome.com"', None, False), + ]) + def test_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid - def test_url(self, user_config): - """ - Test our url checking. - """ - valid_entries = ["https://google.com"] - invalid_entries = ["https://micah_subnaught_is_awesome.com"] - - run_a_checker( - user_config, - valid_entries, - invalid_entries, - checkers.CheckURL, - item='favorite_web_site' - ) - def test_datetime_ordered_pairs(self, user_config): - """ - Tests the ordered datetime pair checker which looks for _start - _end pairs and confirms they occurs in the correct order. - """ - # Test end dates com after start dates - starts = ["1-01-2019", "2019-10-01", "1998-01-14 15:00:00"] - ends = ["1-02-2019", "2019-10-02", "1998-01-14 19:00:00"] - - invalid_entries_starts = [ - "01-01-2020", "2020-06-01", "1998-01-14 20:00:00" - ] - invalid_entries_ends = [ - "01-01-2018", "2018-10-01", "1998-01-14 10:00:00" - ] - - # Check for starts being before the end date - for start, end, error_start, error_end in zip(starts, ends, - invalid_entries_starts, - invalid_entries_ends): - # Check start values are before end values - acfg = {'basic': {'end_date': end}} - - run_a_checker( - user_config, - [start], - [error_start], - checkers.CheckDatetimeOrderedPair, - item="start_date", - extra_config=acfg - ) - - # Check start values are before end values - acfg = {'basic': {'start_date': start}} - - run_a_checker( - user_config, - [end], - [error_end], - checkers.CheckDatetimeOrderedPair, - item="end_date", - extra_config=acfg - ) - - # Check start end values are equal error - acfg = {'basic': {'start_date': '2020-10-01'}} - - run_a_checker( - user_config, - ["2020-10-02"], - ["2020-10-01"], - checkers.CheckDatetimeOrderedPair, - item="end_date", - extra_config=acfg - ) +class TestCheckDatetimeOrderedPair(CheckerTestBase): + checker_cls = checkers.CheckDatetimeOrderedPair - def test_bounds(self, user_config): - """ - MasterConfig options now have max and min values to constrain - continuous types. This tests whether that works - """ - run_a_checker( - user_config, - [1.0, 0.0, '0.5'], - [1.1, -1.0, '10'], - checkers.CheckFloat, - item='fraction' - ) + @pytest.mark.parametrize('section, item, value, extra_config, valid', [ + # Test Starts with Ends as extra config + ('basic', 'start_date', '1-01-2019', {'end_date': '1-02-2019'}, True), + ('basic', 'start_date', '2019-10-01', {'end_date': '2019-10-02'}, True), + ('basic', 'start_date', '1998-01-14 15:00:00', {'end_date': '1998-01-14 19:00:00'}, True), + ('basic', 'start_date', '2020-06-01', {'end_date': '2018-10-01'}, False), + ('basic', 'start_date', '1998-01-14 20:00:00', {'end_date': '1998-01-14 10:00:00'}, False), + # Test Ends with Starts as extra config + ('basic', 'end_date', '2005-10-1 20:00:00', {'start_date': '2004-10-01 10:00:00'}, True), + ('basic', 'end_date', '2016-04-01', {'start_date': '2016-05-01'}, False), + # Equal dates is invalid + ('basic', 'end_date', '2010-11-02', {'start_date': '2010-11-02'}, False), + + ]) + def test_check(self, checker, section, item, value, extra_config, valid): + assert self.check_value(checker) == valid From d559e49677ff8d68988a45d58b8428cc467ae5c3 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Wed, 27 Oct 2021 13:00:02 -0600 Subject: [PATCH 11/21] Parametrized utilities tests more --- tests/test_utilities.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 03ef6b5..60a3a51 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -95,8 +95,12 @@ def test_get_relative_to_cfg(): ("tests_my_kw", ['tests', 'my', 'kw'], 3, True), # Finds all 2 in the string ("te_my_kw", ['tests', 'my', 'kw'], 2, True), - # No match + # Test our actual use case in ordered datetime check + ("start_date", ['start'], 1, True), + # No match at all ("te_ym_k", ['tests', 'my', 'kw'], 1, False), + # Test invalid search for 3 kw when only 2 are present + ("te_my_kw", ['tests', 'my', 'kw'], 3, False), ]) def test_is_kw_matched(value, kw, count, expected): """ @@ -199,3 +203,4 @@ def test_parse_date_fails_int(self): def test_parse_date_fails_with_unknown_string(self): with pytest.raises(TypeError): parse_date("10 F") + From 13a31c01067faafcc4bc600b61f28f8dd036e2d6 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Wed, 27 Oct 2021 14:15:23 -0600 Subject: [PATCH 12/21] Merged issues with imports --- tests/test_cli.py | 2 -- tests/test_utilities.py | 3 +-- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/test_cli.py b/tests/test_cli.py index 25159ca..cd3f252 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -76,8 +76,6 @@ def test_inicheck_details_use(cli_tester): assert s.count("topo") >= 1 -<<<<<<< HEAD - def test_inicheck_changelog_use(cli_tester): """ Test changelog detection output """ diff --git a/tests/test_utilities.py b/tests/test_utilities.py index e85da18..01a9b51 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -7,9 +7,8 @@ Tests for `inicheck.utilities` module. """ from os.path import dirname, join - +from datetime import datetime, date import pytest -from inicheck.tools import get_user_config from inicheck.tools import get_user_config, get_inicheck_cmd from inicheck.utilities import parse_date, remove_comment, \ remove_chars, mk_lst, is_valid, is_kw_matched, get_kw_match,\ From b6b222d905cf4c3593578468deae4d46283fd226 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Wed, 27 Oct 2021 14:26:29 -0600 Subject: [PATCH 13/21] Updated the workflow/Makefile to use pytest --- .github/workflows/unittest.yml | 146 ++++++++++++++++----------------- Makefile | 4 +- 2 files changed, 75 insertions(+), 75 deletions(-) diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 10f8011..e85f13c 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -1,73 +1,73 @@ -name: Unittest, flake8 - -# Run action on pull requests -on: - pull_request: - branches: [master] - -jobs: - flake8: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: "3.7" - - - name: Install dependencies - run: | - python3 -m pip install --upgrade pip - python3 -m pip install flake8 - - - name: Lint with flake8 - run: | - flake8 inicheck - - coverage: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: "3.7" - - - name: Install dependencies - run: | - python3 -m pip install --upgrade pip - python3 -m pip install coverage coveralls PyYAML - python3 -m pip install -r requirements.txt - - - name: Run coverage - run: | - make coverage - coveralls --service=github - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - unittest: - needs: [flake8, coverage] - strategy: - matrix: - os: [ubuntu-latest, macos-latest] - python-version: [3.6, 3.7, 3.8, 3.9] - runs-on: ${{ matrix.os }} - - steps: - - uses: actions/checkout@v2 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - run: | - python3 -m pip install --upgrade pip - python3 -m pip install -r requirements.txt - - - name: Run unittests - run: python3 -m unittest -v +name: Unittest, flake8 + +# Run action on pull requests +on: + pull_request: + branches: [master] + +jobs: + flake8: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: "3.7" + + - name: Install dependencies + run: | + python3 -m pip install --upgrade pip + python3 -m pip install flake8 + + - name: Lint with flake8 + run: | + flake8 inicheck + + coverage: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: "3.7" + + - name: Install dependencies + run: | + python3 -m pip install --upgrade pip + python3 -m pip install coverage coveralls PyYAML + python3 -m pip install -r requirements.txt + + - name: Run coverage + run: | + make coverage + coveralls --service=github + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + unittest: + needs: [flake8, coverage] + strategy: + matrix: + os: [ubuntu-latest, macos-latest] + python-version: [3.6, 3.7, 3.8, 3.9] + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python3 -m pip install --upgrade pip + python3 -m pip install -r requirements.txt + + - name: Run unittests + run: python3 -m pytest -v diff --git a/Makefile b/Makefile index 80c8064..ccdc83f 100644 --- a/Makefile +++ b/Makefile @@ -53,8 +53,8 @@ lint: ## check style with isort and pep8 test: ## run tests quickly with the default Python py.test -coverage: ## run coverage and submit - coverage run --source inicheck setup.py test +coverage: ## check code coverage quickly with the default Python + coverage run --source inicheck -m pytest coverage report --fail-under=80 coveralls: coverage ## run coveralls From d5cc59dfdbccc695c3ff25274e6654db07501e9d Mon Sep 17 00:00:00 2001 From: micah johnson Date: Wed, 27 Oct 2021 14:29:50 -0600 Subject: [PATCH 14/21] Added pytest reqs to the workflow --- .github/workflows/unittest.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index e85f13c..009dbf4 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -38,7 +38,7 @@ jobs: - name: Install dependencies run: | python3 -m pip install --upgrade pip - python3 -m pip install coverage coveralls PyYAML + python3 -m pip install coverage coveralls PyYAML pytest python3 -m pip install -r requirements.txt - name: Run coverage @@ -68,6 +68,7 @@ jobs: run: | python3 -m pip install --upgrade pip python3 -m pip install -r requirements.txt + python3 -m pip install pytest - name: Run unittests run: python3 -m pytest -v From 744e012574bbc9eda9ced69ffa608796a4c17657 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Thu, 28 Oct 2021 21:58:30 -0600 Subject: [PATCH 15/21] Paremetrize/ more fixture focused cli tests. --- tests/conftest.py | 27 ++++++++ tests/test_cli.py | 152 ++++++++++++++++++---------------------------- 2 files changed, 85 insertions(+), 94 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 3d89d20..9bc26fc 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -9,3 +9,30 @@ @pytest.fixture(scope='session', autouse=True) def test_config_dir(): return Path(TEST_ROOT).joinpath('test_configs') + + +@pytest.fixture(scope='session', autouse=True) +def full_config_ini(test_config_dir): + return Path(test_config_dir).joinpath("full_config.ini") + +@pytest.fixture(scope='session', autouse=True) +def base_config_ini(test_config_dir): + return Path(test_config_dir).joinpath("base_cfg.ini") + +@pytest.fixture(scope='session', autouse=True) +def old_smrf_config_ini(test_config_dir): + return Path(test_config_dir).joinpath("old_smrf_config.ini") + + +@pytest.fixture(scope='session', autouse=True) +def changelog_ini(test_config_dir): + return Path(test_config_dir).joinpath("changelog.ini") + + +@pytest.fixture(scope='session', autouse=True) +def master_ini(test_config_dir): + return [os.path.join(test_config_dir, "CoreConfig.ini"), os.path.join(test_config_dir, "recipes.ini")] + +@pytest.fixture(scope='session', autouse=True) +def recipes_ini(test_config_dir): + return Path(test_config_dir).joinpath("recipes.ini") diff --git a/tests/test_cli.py b/tests/test_cli.py index cd3f252..368f351 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -4,114 +4,78 @@ import re import sys -from os.path import abspath, dirname, join +from os.path import join from inicheck.cli import current_version, inicheck_main, inidiff_main - from .test_output import capture_print import pytest -class CLITester(): - def __init__(self): - self.test_base = abspath(join(dirname(__file__), 'test_configs')) - self.master = [ - join(self.test_base, 'recipes.ini'), - join(self.test_base, 'CoreConfig.ini') - ] - self.full_config = join(self.test_base, 'full_config.ini') +class TestInicheckCLI: - def capture_with_params(self, **kwargs): + def capture_with_params(self, config_ini, master_ini, **kwargs): return str( capture_print( inicheck_main, - config_file=self.full_config, - master=self.master, + config_file=config_ini, + master=master_ini, **kwargs ) ) - -@pytest.fixture -def cli_tester(): - cls = CLITester() - - return cls - - -def test_basic_inicheck_cli(cli_tester): - """ Test simplest usage of CLI """ - - s = cli_tester.capture_with_params() - - assert s.count("File does not exist") >= 9 - assert s.count("Not a registered option") >= 20 - - -def test_inicheck_recipe_use(cli_tester): - """ Test recipe output """ - - s = cli_tester.capture_with_params(show_recipes=True) - - assert s.count("_recipe") == 20 - - -def test_inicheck_non_defaults_use(cli_tester): - """ Test non-default output""" - - s = cli_tester.capture_with_params(show_non_defaults=True) - - assert s.count("wind") >= 7 - assert s.count("albedo") >= 3 - - -def test_inicheck_details_use(cli_tester): - """ Test details output """ - - s = cli_tester.capture_with_params(details=['topo']) - - assert s.count("topo") >= 4 - - s = cli_tester.capture_with_params(details=['topo', 'basin_lat']) - - assert s.count("topo") >= 1 - -def test_inicheck_changelog_use(cli_tester): - """ Test changelog detection output """ - - old_cfg = join(cli_tester.test_base, 'old_smrf_config.ini') - - s = str(capture_print( - inicheck_main, - config_file=old_cfg, - master=cli_tester.master, - changelog_file=join(cli_tester.test_base, 'changelog.ini') - )) - assert s.count("topo") == 7 - assert s.count("wind") == 12 - assert s.count("stations") == 5 - assert s.count("solar") == 9 - assert s.count("precip") == 18 - assert s.count("air_temp") == 9 - assert s.count("albedo") == 30 - - -def test_inidiff(cli_tester): - """ - Tests if the inidiff script is producing the same information - """ - - configs = [ - join(cli_tester.test_base, 'full_config.ini'), - join(cli_tester.test_base, 'base_cfg.ini') - ] - - s = capture_print(inidiff_main, configs, master=cli_tester.master) - - mismatches = s.split("config mismatches:")[-1].strip() - assert '117' in mismatches - -def test_version(cli_tester): + @pytest.mark.parametrize('flags_dict, countable_str, expected_str_count', [ + ({}, "File does not exist", 9), + ({}, "Not a registered option", 23), + ({"show_recipes": True}, "_recipe", 20), + ({"show_non_defaults": True}, "wind", 35), + ({"show_non_defaults": True}, "albedo", 3), + ({"details": ['topo']}, "topo", 6), + ({"details": ['topo', 'basin_lat']}, "basin", 3), + ]) + def test_cli_output(self, full_config_ini, master_ini, flags_dict, countable_str, expected_str_count): + """ + Collec the console output from the cli and count certain keywords to check if the result is as expected. + Args: + flags_dict: Kwargs to pass to the cli + countable_str: String to search for in the output and count its occurances + expected_str_count: Integer of expected occurances of the countable_str + """ + s = self.capture_with_params(full_config_ini, master_ini, **flags_dict) + assert s.count(countable_str) == expected_str_count + + @pytest.mark.parametrize("countable_str, expected_str_count", [ + ("topo", 7), + ('wind', 12), + ('solar', 9), + ('precip', 18), + ('air_temp', 9), + ('albedo', 30) + ]) + def test_inicheck_changelog_use(self, old_smrf_config_ini, master_ini, changelog_ini, countable_str, + expected_str_count): + """ Test changelog detection output """ + s = str(capture_print( + inicheck_main, + config_file=old_smrf_config_ini, + master=master_ini, + changelog_file=changelog_ini + )) + assert s.count(countable_str) == expected_str_count + + +class TestInidiffCLI(): + + def test_inidiff(self, full_config_ini, base_config_ini, master_ini): + """ + Tests if the inidiff script is producing the same information + """ + s = capture_print(inidiff_main, [full_config_ini, base_config_ini], master=master_ini) + + mismatches = s.split("config mismatches:")[-1].strip() + assert '117' in mismatches + + +def test_version(): exception_message = re.search( '(exception|error)', str(current_version()), re.IGNORECASE ) From fdd6c18afc7da37ea894e912944dc609c52611a2 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Fri, 29 Oct 2021 05:29:47 -0600 Subject: [PATCH 16/21] parametrized tests in test_config, Found issue with not importing checker in config.py. Parametrized test_changes --- inicheck/config.py | 3 + tests/conftest.py | 26 ++- tests/test_changes.py | 10 +- tests/test_cli.py | 4 - tests/test_config.py | 438 +++++++++++++++--------------------------- 5 files changed, 178 insertions(+), 303 deletions(-) diff --git a/inicheck/config.py b/inicheck/config.py index 80f07d6..2376593 100644 --- a/inicheck/config.py +++ b/inicheck/config.py @@ -9,6 +9,9 @@ from .iniparse import read_config from .utilities import get_relative_to_cfg, mk_lst +# Unused import required for get_checkers to work. +from . import checkers + DEBUG = False FULL_DEBUG = False diff --git a/tests/conftest.py b/tests/conftest.py index 9bc26fc..78993f4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,9 +1,9 @@ -import os +from os.path import join, dirname, abspath from pathlib import Path import pytest -TEST_ROOT = os.path.dirname(os.path.abspath(__file__)) +TEST_ROOT = dirname(abspath(__file__)) @pytest.fixture(scope='session', autouse=True) @@ -13,26 +13,34 @@ def test_config_dir(): @pytest.fixture(scope='session', autouse=True) def full_config_ini(test_config_dir): - return Path(test_config_dir).joinpath("full_config.ini") + return join(test_config_dir, "full_config.ini") + @pytest.fixture(scope='session', autouse=True) def base_config_ini(test_config_dir): - return Path(test_config_dir).joinpath("base_cfg.ini") + return join(test_config_dir, "base_cfg.ini") + @pytest.fixture(scope='session', autouse=True) def old_smrf_config_ini(test_config_dir): - return Path(test_config_dir).joinpath("old_smrf_config.ini") + return join(test_config_dir, "old_smrf_config.ini") @pytest.fixture(scope='session', autouse=True) def changelog_ini(test_config_dir): - return Path(test_config_dir).joinpath("changelog.ini") + return join(test_config_dir, "changelog.ini") @pytest.fixture(scope='session', autouse=True) -def master_ini(test_config_dir): - return [os.path.join(test_config_dir, "CoreConfig.ini"), os.path.join(test_config_dir, "recipes.ini")] +def core_ini(test_config_dir): + return join(test_config_dir, "CoreConfig.ini") + @pytest.fixture(scope='session', autouse=True) def recipes_ini(test_config_dir): - return Path(test_config_dir).joinpath("recipes.ini") + return join(test_config_dir, "recipes.ini") + + +@pytest.fixture(scope='session', autouse=True) +def master_ini(core_ini, recipes_ini): + return [core_ini, recipes_ini] diff --git a/tests/test_changes.py b/tests/test_changes.py index a99e392..9e14f46 100644 --- a/tests/test_changes.py +++ b/tests/test_changes.py @@ -15,17 +15,13 @@ class TestChanges(): - def test_valid_syntax(self): + def test_valid_syntax(self, master_ini, changelog_ini): """ Test we can detect valid syntax """ - base = os.path.dirname(__file__) - master = os.path.join(base, 'test_configs/CoreConfig.ini') - recipes = os.path.join(base, 'test_configs/recipes.ini') - mcfg = MasterConfig(path=[master, recipes]) - cf = os.path.join(base, 'test_configs/changelog.ini') + mcfg = MasterConfig(path=master_ini) try: - c = ChangeLog(paths=cf, mcfg=mcfg) + c = ChangeLog(paths=changelog_ini, mcfg=mcfg) assert True except Exception: assert False diff --git a/tests/test_cli.py b/tests/test_cli.py index 368f351..f0fe226 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -2,10 +2,6 @@ # -*- coding: utf-8 -*- import re -import sys - -from os.path import join - from inicheck.cli import current_version, inicheck_main, inidiff_main from .test_output import capture_print import pytest diff --git a/tests/test_config.py b/tests/test_config.py index 7a74028..ebe465b 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -13,329 +13,201 @@ import pytest from inicheck.config import * from inicheck.tools import cast_all_variables, get_checkers +from tests.conftest import TEST_ROOT, test_config_dir +from os.path import join +class TestUserConfig: + @pytest.fixture(scope='class') + def ucfg(self, full_config_ini): + mcfg = MasterConfig(modules='inicheck') + return UserConfig(full_config_ini, mcfg=mcfg) -def compare_config(generated_config, truth_config, - master=False, mcfg_attr='default'): - """ - Compares config objects for quick tests: - - Args: - generated_config: the config file the inicheck system produced automatically - truth_config: a dictionary made up for checking against - master: boolean for identifying if were checking master config - mcfg_attr: attribute we want to check when using a master config - Returns: - result: boolean describing if cfg are the same. - """ - - # First confirm all keys are available - for s in truth_config.keys(): - if s not in generated_config: - print("\nERROR: test data contains section: {} that was not" - " contained by the generated config.".format(s)) - return False - - for i, v in truth_config[s].items(): - - # Then confirm the item is in the section in the generated config - if i not in generated_config[s].keys(): - print("\nERROR: test data contains item: {} in section: {} that was not" - " contained by the generated config.".format(i, s)) - return False - - # Check the value - else: - # if it is a master config object then use it differently. - if master: - gv = getattr(generated_config[s][i], mcfg_attr) - emsg = "\nERROR: config.{} = {} not {}.".format( - mcfg_attr, - gv, - v) - - # Normal config - else: - gv = generated_config[s][i] - emsg = "\nERROR: config[{}][{}] = {} not {}.".format(s, i, gv, - v) - # Confirm values are the same - if gv != v: - # print(emsg) - return False - - return True + @pytest.fixture(scope='class') + def ucfg_w_recipes(self, full_config_ini): + mcfg = MasterConfig(modules='inicheck') + ucfg = UserConfig(full_config_ini, mcfg=mcfg) + ucfg.apply_recipes() + return ucfg -class UserConfigTester(): - def __init__(self): + @pytest.mark.parametrize('important_att', ['mcfg', 'cfg', 'raw_cfg', 'recipes']) + def test_userconfig_attributes(self, ucfg, important_att): """ + Simply confirms the user config has the important attributes that inicheck relies on """ - fname = os.path.abspath( - os.path.dirname(__file__) + - '/test_configs/full_config.ini') - mcfg = MasterConfig(modules='inicheck') - self.ucfg = UserConfig(fname, mcfg=mcfg) - - -@pytest.fixture -def ucfg_tester(): - return UserConfigTester() - - -def test_ucfg_init(ucfg_tester): - """ - Simply opens and looks at the base config BEFORE all the recipes and - stuff has been applied - """ - - # Assert important attributes - for a in ['mcfg', 'cfg', 'raw_cfg', 'recipes']: - assert(hasattr(ucfg_tester.ucfg, a)) + assert (hasattr(ucfg, important_att)) + + @pytest.mark.parametrize("expected_recipe_name", [ + 'topo_basic_recipe', 'time_recipe', 'air_temp_recipe', 'vp_recipe', 'wind_recipe', 'precip_recipe', + 'non_winstral_recipe', 'cloud_factor_recipe', 'albedo_recipe', 'date_decay_method_recipe', 'solar_recipe', + 'thermal_recipe', 'soil_recipe', 'output_recipe', 'system_recipe', 'csv_recipe', 'remove_wind_ninja_recipe', + 'non_grid_local_recipe', 'dk_recipe', 'idw_recipe' + ]) + def test_apply_recipes(self, ucfg_w_recipes, expected_recipe_name): + """ + Tests that the correct recipes were identified to be used for + interpretation + """ + assert expected_recipe_name in [r.name for r in ucfg_w_recipes.recipes] -def test_apply_recipes(ucfg_tester): +class TestRecipeActions: """ - Tests that the correct recipes were identified to be used for - interpretation + This tests the interpretation/actions of the recipes on the config. """ - truth = {'topo': - {'filename': ['None'], - 'type': 'netcdf'}, - 'cloud_factor': - {'distribution': 'idw'} - } - - ucfg_tester.ucfg.apply_recipes() - valid_recipes = ['topo_basic_recipe', 'time_recipe', 'air_temp_recipe', - 'vp_recipe', 'wind_recipe', 'precip_recipe', - 'non_winstral_recipe', 'cloud_factor_recipe', - 'albedo_recipe', 'date_decay_method_recipe', - 'solar_recipe', 'thermal_recipe', 'soil_recipe', - 'output_recipe', 'system_recipe', 'csv_recipe', - 'remove_wind_ninja_recipe', 'non_grid_local_recipe', - 'dk_recipe', 'idw_recipe'] - for v in valid_recipes: - assert v in [r.name for r in ucfg_tester.ucfg.recipes] - - -class RecipeTester(): - - def __init__(self): - self.fname = os.path.abspath(os.path.dirname(__file__) + - '/test_configs/full_config.ini') - - self.mcfg = MasterConfig(modules='inicheck') - self.ucfg = UserConfig(self.fname, mcfg=self.mcfg) + @pytest.fixture(scope='function') + def ucfg(self, full_config_ini, del_sections, mod_cfg): + mcfg = MasterConfig(modules='inicheck') + ucfg = UserConfig(full_config_ini, mcfg=mcfg) - def modify_cfg(self, mod_cfg): - """ - """ + # Delete and section + for s in del_sections: + del ucfg.raw_cfg[s] + # Modify the config for s, v in mod_cfg.items(): - self.ucfg.raw_cfg[s] = v + ucfg.raw_cfg[s] = v - self.ucfg.apply_recipes() + ucfg.apply_recipes() + return ucfg - def check_items(self, section=None, ignore=[]): + def check_items(self, ucfg, section=None, ignore=[]): """ Checks a section for all the items in the master config. Args: + ucfg: instantiated UserConfig section: Section to exmaine ignore: list of items to ignore useful for case when not everything is added for certain values """ - checkable = [v for v in self.mcfg.cfg[section].keys() + checkable = [v for v in ucfg.mcfg.cfg[section].keys() if v not in ignore] for v in checkable: - assert v in self.ucfg.cfg[section].keys() + assert v in ucfg.cfg[section].keys() - def check_defaults(self, section=None, ignore=[]): + def check_defaults(self, ucfg, section=None, ignore=[]): """ Checks a section for all the items in the master config. """ - checkable = [v for v in self.mcfg.cfg[section].keys() + checkable = [v for v in ucfg.mcfg.cfg[section].keys() if v not in ignore] for i in checkable: - assert self.ucfg.cfg[section][i] == self.mcfg.cfg[section][i].default - - -@pytest.fixture -def recipe_tester(): - return RecipeTester() - - -def test_apply_defaults(recipe_tester): - """ - Tests the functionality of a recipes ability to add in defaults for - section when a section is not there. - """ - - del recipe_tester.ucfg.raw_cfg['csv'] - - test = {'csv': {'stations': None}} - recipe_tester.modify_cfg(test) - - recipe_tester.check_items(section='csv') - recipe_tester.check_defaults(section='csv') - - -def test_remove_section(recipe_tester): - """ - Tests the functionality of a recipes ability to a section in defaults for - section when a section is not there. - """ - - del recipe_tester.ucfg.raw_cfg['csv'] - - # Order matters, since we have conflicting recipes the first one will be - # applied, in this case CSV will beat out gridded - test = {'csv': {'stations': None}, - 'gridded': {}} - - recipe_tester.modify_cfg(test) - - recipe_tester.check_items(section='csv') - assert 'gridded' not in recipe_tester.ucfg.cfg.keys() - - -def test_remove_item_for_a_section(recipe_tester): - """ - Sometimes a recipe will remove an item when a certain section is present - This tests that scenario occurs, uses thermal_distribution_recipe - """ - - test = {'gridded': {'data_type': 'wrf'}} - # The order of recipes matters. Del the csv section to avoid recipes on - # it - del recipe_tester.ucfg.raw_cfg['csv'] - recipe_tester.modify_cfg(test) - - assert 'distribution' not in recipe_tester.ucfg.cfg['thermal'].keys() - - -def test_add_items_for_has_value(recipe_tester): - """ - Recipes have the available keyword has_value to trigger on event where - a section item has a value. This tests that we can trigger on it and - make edits. Test uses the idw_recipe in which if any section is found - with the item distribution set to idw, then we add idw_power and remove - dk_ncores. - """ - - test = {'precip': {'distribution': 'idw', 'dk_ncores': '2'}} - recipe_tester.modify_cfg(test) - - assert 'dk_ncores' not in recipe_tester.ucfg.cfg['precip'].keys() - assert 'idw_power' in recipe_tester.ucfg.cfg['precip'].keys() - - -def test_apply_defaults_for_has_value(recipe_tester): - """ - This recipe applies defaults to items when an item has a certain value - This test uses the krig_recipe in which any item distribution is set to - kriging applies several defautls. - """ + assert ucfg.cfg[section][i] == ucfg.mcfg.cfg[section][i].default - test = {'precip': {'distribution': 'kriging', 'dk_ncores': '2'}} - recipe_tester.modify_cfg(test) - assert 'krig_variogram_model' in recipe_tester.ucfg.cfg['precip'].keys() - assert 'dk_ncores' not in recipe_tester.ucfg.cfg['precip'].keys() - - -class MasterConfigTester(): - def __init__(self): + @pytest.mark.parametrize("del_sections, mod_cfg, section_to_check", [ + (['csv'], {'csv': {'stations': None}}, 'csv') + ]) + def test_apply_defaults(self, ucfg, del_sections, mod_cfg, section_to_check): """ - Stage our truthing data here + Tests the functionality of a recipes ability to ADD in defaults for + section when a section is not there. """ - self.truth_defaults = {'topo': - {'type': 'netcdf', - 'filename': ['./common_data/topo/topo.nc']}, - 'air_temp': - {'distribution': 'idw', - 'detrend': 'true', - 'dk_ncores': '1'}} - - -@pytest.fixture -def mcfg_tester(): - return MasterConfigTester() - - -def test_grabbing_mcfg(mcfg_tester): - """ - Builds a master config from the module and paths, check it. - """ - # Build a master config file using multiple files - try: - mcfg = MasterConfig(modules='inicheck') - assert True - except BaseException: - assert False - - base = os.path.dirname(__file__) - master = os.path.join(base, "./test_configs/CoreConfig.ini") - recipes = os.path.join(base, "./test_configs/recipes.ini") - - try: - mcfg = MasterConfig(path=[master, recipes]) - assert True - except BaseException: - assert False - - -def test_add_files(mcfg_tester): - """ - Builds a master config from the files, check it. - """ - # Build a master config file using multiple files - base = os.path.dirname(__file__) - master = os.path.join(base, "test_configs/CoreConfig.ini") - recipes = os.path.join(base, "test_configs/recipes.ini") - - mcfg = MasterConfig(path=master) - mcfg.cfg = mcfg.add_files([master, recipes]) - - valid_sections = ['topo', 'csv', 'air_temp'] - for v in valid_sections: - assert v in mcfg.cfg.keys() - - assert 'topo_basic_recipe' in [r.name for r in mcfg.recipes] - - -def test_check_types(mcfg_tester): - """ - Checks to make sure we throw the correct error when an unknown data - type is requested - """ - - # Call out a BS entry type to raise the error - checkers = get_checkers() - valids = ['bool', 'criticaldirectory', 'criticalfilename', - 'discretionarycriticalfilename', 'datetime', - 'datetimeorderedpair', 'directory', 'filename', 'float', - 'int', 'string', 'url'] + self.check_items(ucfg, section=section_to_check) + self.check_defaults(ucfg, section=section_to_check) + + @pytest.mark.parametrize("del_sections, mod_cfg, expected_removed_section", [ + # Order matters, since we have conflicting recipes the first one will be + # applied, in this case CSV will beat out gridded + (['csv'], {'csv': {'stations': None}, 'gridded': {}}, 'gridded') + ]) + def test_remove_section(self, ucfg, del_sections, mod_cfg, expected_removed_section): + """ + Tests the functionality of a recipes ability to REMOVE a section in defaults + """ + assert expected_removed_section not in ucfg.cfg.keys() + + @pytest.mark.parametrize("del_sections, mod_cfg, expected_section, expected_item_removed", [ + # Test removing an item when a section is present + (['csv'], {'gridded': {'data_type': 'wrf'}}, 'thermal', 'distribution'), + # Test removing an item when an item has a value + ([], {'precip': {'distribution': 'idw', 'dk_ncores': '2'}}, 'precip', 'dk_ncore') + ]) + def test_remove_item_recipe(self, ucfg, del_sections, mod_cfg, expected_section, expected_item_removed): + """ + Recipes can be triggered to remove items under certain events. The triggers can be the presence of a section + or an item has a specific value + """ + assert expected_item_removed not in ucfg.cfg[expected_section].keys() - for kw in valids: - line = ["type = {}".format(kw), "description = test"] - cfg = {"section": {"test": ConfigEntry(name='test', - parseable_line=line)}} - assert check_types(cfg, checkers) + @pytest.mark.parametrize("del_sections, mod_cfg, expected_section, expected_item_added", [ + # Test krig_recipe which added the variogram when the distribution is kriging + (['precip'], {'precip': {'distribution': 'kriging'}}, 'precip', 'krig_variogram_model') + ]) + def test_adding_item_recipe(self, ucfg, del_sections, mod_cfg, expected_section, expected_item_added): + """ + This recipe applies defaults to items when an item has a certain value + """ + assert expected_item_added in ucfg.cfg[expected_section].keys() -def test_check_types_exception(mcfg_tester): +class TestMasterConfig(): + @pytest.fixture + def mcfg(self): + return MasterConfig(modules='inicheck') - # Call out a BS entry type to raise the error - checkers = get_checkers() - invalids = ['str', 'filepath', 'criticalfile'] + @pytest.mark.parametrize("mcfg_kwargs", [ + ({"modules":'inicheck'}), # Master config from module + ({"path": join(TEST_ROOT, 'test_configs', 'CoreConfig.ini')}), # Master config from file path + ]) + def test_mcfg_instantiate(self, mcfg_kwargs): + """ + Attempt to instantiate a master config with different kwargs + """ + try: + MasterConfig(**mcfg_kwargs) + assert True + except BaseException: + assert False - for kw in invalids: - line = ["type = {}".format(kw), "description = test"] - cfg = {"section": {"test": ConfigEntry(name='test', - parseable_line=line)}} + def test_exception_on_no_files(self): + """ + This tests that a master config when attempted to instantiate without a module or a path throws an exception + """ with pytest.raises(ValueError): - check_types(cfg, checkers) + MasterConfig() + + def test_add_recipe_file(self, core_ini, recipes_ini): + """ + Builds a master config from the files, add a recipe file, check that the recipe name is in the list + """ + # Build a master config file using multiple files + mcfg = MasterConfig(path=core_ini) + mcfg.cfg = mcfg.add_files([recipes_ini]) + assert 'topo_basic_recipe' in [r.name for r in mcfg.recipes] + +# @pytest.mark.parametrize("valid_type") +# def test_check_types(mcfg_tester): +# """ +# Checks to make sure we throw the correct error when an unknown data +# type is requested +# """ +# +# # Call out a BS entry type to raise the error +# checkers = get_checkers() +# valids = ['bool', 'criticaldirectory', 'criticalfilename', +# 'discretionarycriticalfilename', 'datetime', +# 'datetimeorderedpair', 'directory', 'filename', 'float', +# 'int', 'string', 'url'] +# +# for kw in valids: +# line = ["type = {}".format(kw), "description = test"] +# cfg = {"section": {"test": ConfigEntry(name='test', +# parseable_line=line)}} +# assert check_types(cfg, checkers) +# +# +# def test_check_types_exception(mcfg_tester): +# # Call out a BS entry type to raise the error +# checkers = get_checkers() +# invalids = ['str', 'filepath', 'criticalfile'] +# +# for kw in invalids: +# line = ["type = {}".format(kw), "description = test"] +# cfg = {"section": {"test": ConfigEntry(name='test', +# parseable_line=line)}} +# with pytest.raises(ValueError): +# check_types(cfg, checkers) From a8727975fa31387eeb89f84ef2c9358d5259a66c Mon Sep 17 00:00:00 2001 From: micah johnson Date: Fri, 29 Oct 2021 06:23:08 -0600 Subject: [PATCH 17/21] Parametrized entries.py --- tests/test_entries.py | 80 +++++++++++++++---------------------------- 1 file changed, 27 insertions(+), 53 deletions(-) diff --git a/tests/test_entries.py b/tests/test_entries.py index 0e70308..7615754 100644 --- a/tests/test_entries.py +++ b/tests/test_entries.py @@ -7,66 +7,40 @@ Tests for `inicheck.entries` module. """ -from collections import OrderedDict - from inicheck.entries import ConfigEntry, RecipeSection, TriggerEntry - +import pytest class TestEntries(): - def test_trigger_entry(self): + @pytest.mark.parametrize('trigger_str_list, expected_conditions_list', [ + (['has_section = test'], [['test', 'any', 'any']]), + (['has_item = test'], [['any', 'test', 'any']]), + (['has_value = [test test test]'], [['test', 'test', 'test']]), + (['has_section = test', 'has_item = test2'], [['test', 'any', 'any'], ['any', 'test2', 'any']]), + (['has_value = [topo type ipw]'], [['topo', 'type', 'ipw']]) + ]) + def test_trigger_entry(self, trigger_str_list, expected_conditions_list): """ - Tests to see if we correctly gather a trigger for a recipe + Triggers define the condition sets for which a recipe is applied. This tests that those are interepretted + correctly. A trigger can be defined by multiple triggers. """ - entries = ['has_section = test', - 'has_item = test', - 'has_value = [test test test]', - ['has_section = test', - 'has_item = test2']] # section trigger - t = TriggerEntry(entries[0]) - assert(t.conditions[0] == ['test', 'any', 'any']) - - # section item - t = TriggerEntry(entries[1]) - assert(t.conditions[0] == ['any', 'test', 'any']) - - t = TriggerEntry(entries[2]) - assert(t.conditions[0] == ['test', 'test', 'test']) - - # Confirm we can handle multiple conditions - t = TriggerEntry(entries[3]) - assert(t.conditions[0] == ['test', 'any', 'any']) - assert(t.conditions[1] == ['any', 'test2', 'any']) - - def test_config_tigger_entry(self): + t = TriggerEntry(trigger_str_list) + assert(t.conditions == expected_conditions_list) + + @pytest.mark.parametrize("entry_str_list, expected_attribute, expected_value", [ + (["default= [swe_z]", "type= string list"], 'default', ['swe_z']), + (["default= 10.5", "type= float"], 'default', '10.5'), + (["min = 2"], 'min', '2'), + # (["allow_none = true"], 'allow_none', 'true'), + # (["allow_none = false"], 'allow_none', False), + (["options = [auth guest]"], 'options', ['auth', 'guest']), + (["description = test"], 'description', 'test'), + ]) + def test_config_entry(self, entry_str_list, expected_attribute, expected_value): """ - Tests to see if we correctly gather a trigger for a recipe + This tests defining a master config entry from a string. """ - entries = [['has_value = [topo type ipw]']] - - t = TriggerEntry(entries[0]) - assert(t.conditions[0] == ['topo', 'type', 'ipw']) - - def test_config(self): - """ - User should be able to make a single item a list by requesting it by - with a master config entry like the following - ``` - points_values_property: - default= [swe_z], - type= string list, - description = stuff - ``` - Issue #20 - """ - s = ["default= [swe_z]", - "type= string list", - "description = stuff"] - - e = ConfigEntry(name="points_values_property", parseable_line=s) + e = ConfigEntry(name=None, parseable_line=entry_str_list) + assert getattr(e, expected_attribute) == expected_value - # Assert the defaults a signle item list - assert e.default == ['swe_z'] - assert e.type == 'string' - assert e.listed == True From 74a62a2272b5c136d1a1d2ff7adfc87b10424462 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Fri, 29 Oct 2021 06:31:41 -0600 Subject: [PATCH 18/21] Found a small bug in entries on allow none interpretation. Fixed and added associated tests. Fixed #63 --- inicheck/entries.py | 4 ++++ tests/test_entries.py | 13 ++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/inicheck/entries.py b/inicheck/entries.py index 5d1fb46..f891390 100755 --- a/inicheck/entries.py +++ b/inicheck/entries.py @@ -187,3 +187,7 @@ def __init__(self, name=None, parseable_line=None): # Allow none should always be a bool if str(self.allow_none).lower() == 'false': self.allow_none = False + elif str(self.allow_none).lower() == 'true': + self.allow_none = True + else: + raise ValueError('Unrecognized allow_none in config entry named {}'.format(self.name)) diff --git a/tests/test_entries.py b/tests/test_entries.py index 7615754..423767a 100644 --- a/tests/test_entries.py +++ b/tests/test_entries.py @@ -29,11 +29,12 @@ def test_trigger_entry(self, trigger_str_list, expected_conditions_list): assert(t.conditions == expected_conditions_list) @pytest.mark.parametrize("entry_str_list, expected_attribute, expected_value", [ - (["default= [swe_z]", "type= string list"], 'default', ['swe_z']), + (["default= [swe_z]", "type= string list"], 'default', ['swe_z']), # Test the list keyword in the type + (["default= [swe_z]", "type= string listed"], 'default', ['swe_z']), # Test the other list keyword in type (["default= 10.5", "type= float"], 'default', '10.5'), (["min = 2"], 'min', '2'), - # (["allow_none = true"], 'allow_none', 'true'), - # (["allow_none = false"], 'allow_none', False), + (["allow_none = true"], 'allow_none', True), + (["allow_none = false"], 'allow_none', False), (["options = [auth guest]"], 'options', ['auth', 'guest']), (["description = test"], 'description', 'test'), ]) @@ -44,3 +45,9 @@ def test_config_entry(self, entry_str_list, expected_attribute, expected_value): e = ConfigEntry(name=None, parseable_line=entry_str_list) assert getattr(e, expected_attribute) == expected_value + def test_config_entry_allow_none_exception(self): + """ + Test that an invalid string bool raises an exception + """ + with pytest.raises(ValueError): + e = ConfigEntry(name=None, parseable_line=['allow_none = ture']) From f8f12e347ae79b4b22c872b8794a48a415d84a03 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Fri, 29 Oct 2021 13:55:32 -0600 Subject: [PATCH 19/21] Added more fixtures that get used all over. Specifically the paths to master and full configs. Also parametrized more tests and expanded some. Working on #58 --- tests/conftest.py | 5 ++ tests/test_output.py | 149 ++++++++++++++++++++-------------------- tests/test_tools.py | 120 ++++++++++++++------------------ tests/test_utilities.py | 32 ++++----- 4 files changed, 143 insertions(+), 163 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 78993f4..b56adbf 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,6 @@ from os.path import join, dirname, abspath from pathlib import Path +from inicheck.tools import get_user_config import pytest @@ -44,3 +45,7 @@ def recipes_ini(test_config_dir): @pytest.fixture(scope='session', autouse=True) def master_ini(core_ini, recipes_ini): return [core_ini, recipes_ini] + +@pytest.fixture(scope='session', autouse=True) +def full_ucfg(full_config_ini, master_ini): + return get_user_config(full_config_ini, master_files=master_ini) diff --git a/tests/test_output.py b/tests/test_output.py index 20800c1..fc10ca0 100644 --- a/tests/test_output.py +++ b/tests/test_output.py @@ -43,77 +43,78 @@ def capture_print(function_call, *args, **kwargs): return out -class OutputTester(): - - def __init__(self): - base = os.path.dirname(__file__) - self.ucfg = get_user_config(os.path.join(base, "test_configs/full_config.ini"), - modules="inicheck") - - -@pytest.fixture -def output_tester(): - return OutputTester() - - -def test_generate_config(output_tester): - """ - Tests if we generate a config to a file - """ - generate_config(output_tester.ucfg, 'out_config.ini', cli=False) - - with open('out_config.ini') as fp: - lines = fp.readlines() - fp.close() - - # Assert a header is written - assert 'Configuration' in lines[1] - - key_count = 0 - - # Assert all the sections are written - for k in output_tester.ucfg.cfg.keys(): - for l in lines: - if k in l: - key_count += 1 - break - - assert key_count == len(output_tester.ucfg.cfg.keys()) - - -def test_print_recipe_summary(output_tester): - """ - Checks that the output produces 366 lines of recipe info - """ - lst_recipes = output_tester.ucfg.mcfg.recipes - out = capture_print(print_recipe_summary, lst_recipes) - - assert len(out.split('\n')) == 366 - assert out.count('recipe') == 34 - - -def test_print_details(output_tester): - """ - Tests the function for printting help on the master config - """ - # Test for a whole section - details = ['air_temp'] - out = capture_print(print_details, details, output_tester.ucfg.mcfg.cfg) - - assert out.count('air_temp') == len( - output_tester.ucfg.mcfg.cfg[details[0]]) - - # test for a section and item - details = ['precip', 'distribution'] - out = capture_print(print_details, details, output_tester.ucfg.mcfg.cfg) - assert out.count('precip ') == 1 - - -def test_non_default_print(output_tester): - """ - Tests if printing the non-defaults is working - """ - out = capture_print(print_non_defaults, output_tester.ucfg) - - # Check that we have 27 lines of info for non-defaults - assert len(out.split('\n')) == 27 +class TestOutput: + + @pytest.fixture() + def ucfg(self, full_config_ini): + return get_user_config(full_config_ini, modules="inicheck") + + def test_generate_config_header(self, ucfg): + """ + Tests if we generate a config and dump it to a file with a header + """ + generate_config(ucfg, 'out_config.ini', cli=False) + + with open('out_config.ini') as fp: + lines = fp.readlines() + fp.close() + + # Assert a header is written + assert 'Configuration' in lines[1] + + def test_generate_config_sections(self, ucfg): + """ + Tests if we generate a config and dump it to a file and all + the sections are written to the file + """ + generate_config(ucfg, 'out_config.ini', cli=False) + + with open('out_config.ini') as fp: + lines = fp.readlines() + fp.close() + + key_count = 0 + + # Assert all the sections are written + for k in ucfg.cfg.keys(): + for line in lines: + if k in line: + key_count += 1 + break + + assert key_count == len(ucfg.cfg.keys()) + + @pytest.mark.parametrize('keyword, expected_count', [ + ('\n', 365), # Test 365 line returns are produced + ('recipe', 34) # Test 34 recipes are printed out + ]) + def test_print_recipe_summary(self, ucfg, keyword, expected_count): + """ + Checks that the print_summary produces a specific count of a keyword in the output + """ + lst_recipes = ucfg.mcfg.recipes + out = capture_print(print_recipe_summary, lst_recipes) + assert out.count(keyword) == expected_count + + @pytest.mark.parametrize('details_list, keyword, expected_count', [ + (['air_temp'], 'air_temp', 18), # test details on one section, should match the number of items in the section + (['precip', 'distribution'], 'distribution', 1), # test details output on an item + ]) + def test_print_details(self, ucfg, details_list, keyword, expected_count): + """ + Tests the function for printting help on the master config + """ + # Test for a whole section + out = capture_print(print_details, details_list, ucfg.mcfg.cfg) + + assert out.count(keyword) == expected_count + + + def test_non_default_print(self, ucfg): + """ + Tests if printing the non-defaults is working + """ + out = capture_print(print_non_defaults, ucfg) + + # Check that we have 27 lines of info for non-defaults + assert len(out.split('\n')) == 27 diff --git a/tests/test_tools.py b/tests/test_tools.py index 49493b9..06e77cb 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -11,95 +11,64 @@ import pytest from inicheck.tools import * - - -class ToolTester(): - def __init__(self): - base = os.path.dirname(__file__) - self.ucfg = get_user_config(os.path.join(base, - "test_configs/full_config.ini"), - modules="inicheck") - - -@pytest.fixture -def tool_tester(): - return ToolTester() - - -def test_get_checkers(tool_tester): +from datetime import datetime +from os.path import join +from .conftest import TEST_ROOT + +@pytest.mark.parametrize('fn_kwargs, expected_types', [ + ({}, ['bool', 'criticaldirectory', 'criticalfilename', 'datetime', 'datetimeorderedpair', + 'directory', 'filename', 'float', 'int', 'string', 'url']), + ({'ignore': []}, ["type", "generic", "path"]), + # Check the keyword is removed from the name + ({'keywords': 'checkdate'}, ["time"]), + # Confirm get_checkers can grab any class from any module using keywords and modules + ({'module': 'inicheck.config', 'keywords': 'config'}, ["user", 'master']), + +]) +def test_get_checkers(fn_kwargs, expected_types): """ Tests the get_checkers func in tools """ - checkers = get_checkers().keys() - - valids = ['bool', 'criticaldirectory', 'criticalfilename', 'datetime', - 'datetimeorderedpair', 'directory', 'filename', 'float', - 'int', 'string', 'url'] - - for v in valids: - assert v in checkers - - valids = ["type", "generic", "path"] - checkers = get_checkers(ignore=[]).keys() - for v in valids: + checkers = get_checkers(**fn_kwargs).keys() + for v in expected_types: assert v in checkers -def test_check_config(tool_tester): +def test_check_config(full_ucfg): """ Tests the check_config func in tools """ - ucfg = tool_tester.ucfg - - warnings, errors = check_config(ucfg) - + warnings, errors = check_config(full_ucfg) assert len(errors) == 11 - -def test_cast_all_variables(tool_tester): +@pytest.mark.parametrize("section, item, str_value, expected_type", [ + ('time', 'start_date', "10-1-2019", datetime), + ('air_temp', 'dk_ncores', "1.0", int), + ('air_temp', 'detrend', "true", bool), + ('wind', 'reduction_factor', "0.2", float), + ('precip', 'distribution', "dk", str), + ('output', 'variables', "dk", list), +]) +def test_cast_all_variables(full_config_ini, core_ini, section, item, str_value, expected_type): """ Tests the cast_all_variables func in tools """ - ucfg = tool_tester.ucfg - - ucfg.cfg['topo']['test_start'] = "10-1-2019" - ucfg.cfg['air_temp']['dk_ncores'] = "1.0" - ucfg.cfg['air_temp']['detrend'] = "true" - + ucfg = get_user_config(full_config_ini, master_files=core_ini) + ucfg.cfg = {section: {item: str_value}} ucfg = cast_all_variables(ucfg, ucfg.mcfg) - results = ['datetime', 'int', 'bool', 'float', 'list', 'str'] - - tests = [ucfg.cfg['time']['start_date'], - ucfg.cfg['air_temp']['dk_ncores'], - ucfg.cfg['air_temp']['detrend'], - ucfg.cfg['wind']['reduction_factor'], - ucfg.cfg['output']['variables'], - ucfg.cfg['air_temp']['distribution']] + assert type(ucfg.cfg[section][item]) == expected_type - for i, v in enumerate(tests): - assert results[i] in type(v).__name__.lower() - -def test_get_user_config(tool_tester): +def test_get_user_config_exception(): """ Tests getting the user config """ - - base = os.path.dirname(__file__) - path = os.path.join(base, "test_configs/full_config.ini") - # check for the Exception - with pytest.raises(IOError): - get_user_config(path) - with pytest.raises(IOError): get_user_config('not_a_file.ini') - ucfg = get_user_config(path, modules='inicheck') - assert ucfg - -def test_config_documentation(tool_tester): +def test_config_documentation_error(): """ Confirms that we still make config documentation """ @@ -108,10 +77,23 @@ def test_config_documentation(tool_tester): f = '/no/folder/exists/f.rst' config_documentation(f, modules='inicheck') - # Try it hope it runs +@pytest.fixture() +def documentation_f(): f = 'test.rst' - config_documentation(f, modules='inicheck') - assert True - - # Clean up + yield f os.remove(f) +@pytest.mark.parametrize("fn_kwargs", [ + {"modules": 'inicheck'}, + {"paths": [join(TEST_ROOT, 'test_configs', 'CoreConfig.ini')]} +]) +def test_config_documentation(documentation_f, fn_kwargs): + """ + Test the auto documentation function for the config file using different methods of + supplying a master config + """ + config_documentation(documentation_f, **fn_kwargs) + + with open(documentation_f) as fp: + lines = fp.readlines() + assert len(lines) == 1483 + diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 01a9b51..08a6ef0 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -57,35 +57,29 @@ def test_remove_chars(): assert ' ' not in out -def test_find_options_in_recipes(): +@pytest.mark.parametrize("expected_options", [ + (['gridded', 'csv', 'mysql']) +]) +def test_find_options_in_recipes(full_ucfg, expected_options): """ Tests utilities.find_options_in_recipes which extracts choices being made by looking at the recipes and determining which work on each other such that they don't exist at the same time. Used in the inimake cli """ - base = dirname(__file__) - ucfg = get_user_config(join(base, "test_configs", "full_config.ini"), - modules='inicheck') - mcfg = ucfg.mcfg + mcfg = full_ucfg.mcfg choices = find_options_in_recipes(mcfg.recipes, mcfg.cfg.keys(), "remove_section") - - # Currently there is 3 sections that are set as optional in the recipes - for opt in ['gridded', 'mysql', 'csv']: - assert opt in choices[0] + assert sorted(expected_options) == sorted(choices[0]) -def test_get_relative_to_cfg(): +def test_get_relative_to_cfg(full_ucfg): """ Tests that all paths in a config can be made relative to the config """ - base = dirname(__file__) - ucfg = get_user_config(join(base, "test_configs", "full_config.ini"), - modules='inicheck') - mcfg = ucfg.mcfg + mcfg = full_ucfg.mcfg choices = find_options_in_recipes(mcfg.recipes, mcfg.cfg.keys(), "remove_section") - p = get_relative_to_cfg(__file__, ucfg.filename) + p = get_relative_to_cfg(__file__, full_ucfg.filename) assert p == '../test_utilities.py' @@ -147,18 +141,16 @@ def test_is_valid(value, dtype, allow_none, expected): assert result[1] == expected[1] -def test_get_inicheck_cmd(): +def test_get_inicheck_cmd(full_config_ini): """ Test if the cmd used to generate the str command is working """ - base = dirname(__file__) - fname = join(base, "test_configs", "full_config.ini") cmd = get_inicheck_cmd( - fname, + full_config_ini, modules='inicheck', master_files=None) - assert cmd == 'inicheck -f {} -m inicheck'.format(fname) + assert cmd == 'inicheck -f {} -m inicheck'.format(full_config_ini) class TestUtilitiesDateParse(): From 9461ca6bf5d5d744527ce19d027f3b2de03c2447 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Fri, 29 Oct 2021 13:58:59 -0600 Subject: [PATCH 20/21] Importing inicheck checkers into config to avoid issues with get_checker. I had to rename the import since there are local variables named checkers as well. --- inicheck/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inicheck/config.py b/inicheck/config.py index 2376593..6af75b4 100644 --- a/inicheck/config.py +++ b/inicheck/config.py @@ -10,7 +10,7 @@ from .utilities import get_relative_to_cfg, mk_lst # Unused import required for get_checkers to work. -from . import checkers +from . import checkers as checkers_module DEBUG = False FULL_DEBUG = False From 1c1e3c63b41920834b4bbdfe306c402edb1b0351 Mon Sep 17 00:00:00 2001 From: micah johnson Date: Fri, 29 Oct 2021 22:10:16 -0600 Subject: [PATCH 21/21] flake8 fixes to recent edits --- inicheck/config.py | 2 +- inicheck/entries.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/inicheck/config.py b/inicheck/config.py index 6af75b4..2422d9c 100644 --- a/inicheck/config.py +++ b/inicheck/config.py @@ -10,7 +10,7 @@ from .utilities import get_relative_to_cfg, mk_lst # Unused import required for get_checkers to work. -from . import checkers as checkers_module +from . import checkers as checkers_module # noqa DEBUG = False FULL_DEBUG = False diff --git a/inicheck/entries.py b/inicheck/entries.py index f891390..15e9906 100755 --- a/inicheck/entries.py +++ b/inicheck/entries.py @@ -190,4 +190,5 @@ def __init__(self, name=None, parseable_line=None): elif str(self.allow_none).lower() == 'true': self.allow_none = True else: - raise ValueError('Unrecognized allow_none in config entry named {}'.format(self.name)) + raise ValueError('Unrecognized allow_none in config entry named {}' + ''.format(self.name))