Skip to content

Commit

Permalink
Pulled out some OBE comments
Browse files Browse the repository at this point in the history
  • Loading branch information
talagluck committed Aug 1, 2019
1 parent 7d0f0c2 commit efd08e1
Showing 1 changed file with 19 additions and 54 deletions.
73 changes: 19 additions & 54 deletions tests/render/test_render_BulletListContentBlock.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,17 @@ def test_substitute_none_for_missing():
assert my_kwargs == {"a": 1, "b": 2}, \
"substitute_none_for_missing should not change input kwargs in place."


# Commenting out the test below. It is helpful during development, but is not a high confidence acceptance test.

@pytest.mark.smoketest
def test_all_expectations_using_test_definitions():
# Fetch test_definitions for all expectations.
# Note: as of 6/20/2019, coverage is good, but not 100%
test_files = glob.glob(
"tests/test_definitions/*/expect*.json"
)

all_true = True
failure_count, total_count = 0, 0
types = []
# Loop over all test_files, datasets, and tests:

# Loop over all test_files, datasets, and tests:
test_results = {}
for filename in test_files:
test_definitions = json.load(open(filename))
Expand All @@ -61,53 +56,23 @@ def test_all_expectations_using_test_definitions():
# This would be a good place to put a kwarg-to-arg converter
continue

try:
# Attempt to render it
render_result = PrescriptiveBulletListContentBlockRenderer.render(
[fake_expectation])
# print(fake_expectation)
# Assert that the rendered result matches the intended format
# Note: THIS DOES NOT TEST CONTENT AT ALL.
# Abe 6/22/2019: For the moment, I think it's fine to not test content.
# I'm on the fence about the right end state for testing renderers at this level.
# Spot checks, perhaps?
assert isinstance(render_result, dict)
assert "content_block_type" in render_result
assert render_result["content_block_type"] in render_result
assert isinstance(render_result[render_result["content_block_type"]], list )

# TODO: Assert that the template is renderable, with all the right arguments, etc.
# rendered_template = pTemplate(el["template"]).substitute(el["params"])

test_results[test_definitions["expectation_type"]].append({
test["title"]:render_result,
# "rendered_template":rendered_template
})

except Exception:
print(test['title'])
raise

except AssertionError:
raise
# # If the assertions fail, then print the expectation to allow debugging.
# # Do NOT trap other errors, so that developers can debug using the full traceback.
# print(fake_expectation)
# all_true = False
# failure_count += 1

# except Exception as e:
# print(fake_expectation)
# raise(e)

# total_count += 1

# print(len(types))
# print(len(set(types)))
# print(total_count-failure_count, "of", total_count,
# "succeeded (", 1-failure_count*1./total_count, ")")
# Attempt to render it
render_result = PrescriptiveBulletListContentBlockRenderer.render(
[fake_expectation])

assert isinstance(render_result, dict)
assert "content_block_type" in render_result
assert render_result["content_block_type"] in render_result
assert isinstance(render_result[render_result["content_block_type"]], list )

# TODO: Assert that the template is renderable, with all the right arguments, etc.
# rendered_template = pTemplate(el["template"]).substitute(el["params"])

test_results[test_definitions["expectation_type"]].append({
test["title"]:render_result,
# "rendered_template":rendered_template
})

# TODO: accommodate case where multiple datasets exist within one expectation test definition
with open('./tests/render/output/test_render_bullet_list_content_block.json', 'w') as f:
json.dump(test_results, f, indent=2)

# assert all_true
json.dump(test_results, f, indent=2)

0 comments on commit efd08e1

Please sign in to comment.