Skip to content

Commit

Permalink
Merge branch 'develop' into upstream/regex
Browse files Browse the repository at this point in the history
  • Loading branch information
jcampbell committed May 31, 2019
2 parents c8dc221 + 6022c77 commit 7e3e72d
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 39 deletions.
75 changes: 37 additions & 38 deletions tests/test_definitions/test_expectations.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,56 +18,52 @@ def pytest_generate_tests(metafunc):
# Load all the JSON files in the directory
dir_path = os.path.dirname(os.path.realpath(__file__))
expectation_dirs = [dir_ for dir_ in os.listdir(dir_path) if os.path.isdir(os.path.join(dir_path, dir_))]

parametrized_tests = []
ids = []

for expectation_category in expectation_dirs:

test_configuration_files = glob.glob(dir_path+'/' + expectation_category + '/*.json')
for c in CONTEXTS:
for filename in test_configuration_files:
file = open(filename)
test_configuration = json.load(file)

if candidate_test_is_on_temporary_notimplemented_list(c, test_configuration["expectation_type"]):
logger.debug("Skipping generation of tests for expectation " + test_configuration["expectation_type"] +
" and context " + c)
else:
for d in test_configuration['datasets']:
for d in test_configuration['datasets']:
skip = False
# Pass the test if we are in a test condition that is a known exception
if candidate_test_is_on_temporary_notimplemented_list(c, test_configuration["expectation_type"]):
skip = True

if skip:
schemas = data_asset = None
else:
schemas = d["schemas"] if "schemas" in d else None
data_asset = get_dataset(c, d["data"], schemas=schemas)

for test in d["tests"]:
# Pass the test if we are in a test condition that is a known exception

# Don't generate tests based on certain configurations

# Known condition: SqlAlchemy does not support allow_cross_type_comparisons
if 'allow_cross_type_comparisons' in test['in'] and isinstance(data_asset, SqlAlchemyDataset):
continue

if 'suppress_test_for' in test:
# Optionally suppress the test for specified DataAsset types
if 'SQLAlchemy' in test['suppress_test_for'] and isinstance(data_asset, SqlAlchemyDataset):
continue
if 'sqlite' in test['suppress_test_for'] and isinstance(data_asset, SqlAlchemyDataset) and isinstance(data_asset.engine.dialect, sqliteDialect):
continue
if 'postgresql' in test['suppress_test_for'] and isinstance(data_asset, SqlAlchemyDataset) and isinstance(data_asset.engine.dialect, postgresqlDialect):
continue
if 'Pandas' in test['suppress_test_for'] and isinstance(data_asset, PandasDataset):
continue
if 'Spark' in test['suppress_test_for'] and isinstance(data_asset, SparkDFDataset):
continue

parametrized_tests.append({
"expectation_type": test_configuration["expectation_type"],
"dataset": data_asset,
"test": test,
})

ids.append(expectation_category + "/" +
c+":"+test_configuration["expectation_type"]+":"+test["title"])
for test in d["tests"]:
if 'suppress_test_for' in test and (
'SQLAlchemy' in test['suppress_test_for'] and isinstance(data_asset, SqlAlchemyDataset)
or 'sqlite' in test['suppress_test_for'] and isinstance(data_asset, SqlAlchemyDataset) and isinstance(data_asset.engine.dialect, sqliteDialect)
or 'postgresql' in test['suppress_test_for'] and isinstance(data_asset, SqlAlchemyDataset) and isinstance(data_asset.engine.dialect, postgresqlDialect)
or 'Pandas' in test['suppress_test_for'] and isinstance(data_asset, PandasDataset)
or 'Spark' in test['suppress_test_for'] and isinstance(data_asset, SparkDFDataset)
):
skip = True
# Known condition: SqlAlchemy does not support allow_cross_type_comparisons
if 'allow_cross_type_comparisons' in test['in'] and isinstance(data_asset, SqlAlchemyDataset):
skip = True

parametrized_tests.append({
"expectation_type": test_configuration["expectation_type"],
"dataset": data_asset,
"test": test,
"skip": skip,
})

ids.append(expectation_category + "/" +
c+":"+test_configuration["expectation_type"]+":"+test["title"])

metafunc.parametrize(
"test_case",
Expand All @@ -77,6 +73,9 @@ def pytest_generate_tests(metafunc):


def test_case_runner(test_case):
if test_case['skip']:
pytest.skip()

# Note: this should never be done in practice, but we are wiping expectations to reuse datasets during testing.
test_case["dataset"]._initialize_expectations()

Expand Down
1 change: 0 additions & 1 deletion tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,6 @@ def evaluate_json_test(data_asset, expectation_type, test):
# Check results
if test['exact_match_out'] is True:
assert test['out'] == result

else:
for key, value in test['out'].items():
# Apply our great expectations-specific test logic
Expand Down

0 comments on commit 7e3e72d

Please sign in to comment.