diff --git a/great_expectations/cli/datasource.py b/great_expectations/cli/datasource.py index 18f71afb88bc..f81226a25e40 100644 --- a/great_expectations/cli/datasource.py +++ b/great_expectations/cli/datasource.py @@ -882,7 +882,7 @@ def create_expectation_suite( ) if profiling_results['success']: - build_docs(context, view=open_docs) + build_docs(context, view=False) if open_docs: # This is mostly to keep tests from spawning windows try: # TODO this is really brittle and not covered in tests diff --git a/great_expectations/data_asset/data_asset.py b/great_expectations/data_asset/data_asset.py index 0efa9c7d0937..b2a6174c815d 100644 --- a/great_expectations/data_asset/data_asset.py +++ b/great_expectations/data_asset/data_asset.py @@ -230,7 +230,7 @@ def wrapper(self, *args, **kwargs): if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() - exception_message = str(err) + exception_message = "{}: {}".format(type(err).__name__, str(err)) return_obj = ExpectationValidationResult(success=False) @@ -837,12 +837,12 @@ def save_expectation_suite( raise ValueError("Unable to save config: filepath or data_context must be available.") def validate(self, - expectation_suite=None, + expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, - catch_exceptions=True, - result_format=None, + catch_exceptions=True, + result_format=None, only_return_failures=False): """Generates a JSON-formatted report describing the outcome of all expectations. diff --git a/great_expectations/render/renderer/content_block/validation_results_table_content_block.py b/great_expectations/render/renderer/content_block/validation_results_table_content_block.py index 5f5e1fee2f9e..3137496f4bb7 100644 --- a/great_expectations/render/renderer/content_block/validation_results_table_content_block.py +++ b/great_expectations/render/renderer/content_block/validation_results_table_content_block.py @@ -10,7 +10,7 @@ RenderedContentBlockContainer, RenderedStringTemplateContent, RenderedTableContent, -) + CollapseContent) from great_expectations.render.util import num_to_str logger = logging.getLogger(__name__) @@ -152,12 +152,12 @@ def _get_unexpected_statement(cls, evr): result = evr.result if evr.exception_info["raised_exception"]: - template_str = "\n\n$expectation_type raised an exception:\n$exception_message" + exception_message_template_str = "\n\n$expectation_type raised an exception:\n$exception_message" - return RenderedStringTemplateContent(**{ + exception_message = RenderedStringTemplateContent(**{ "content_block_type": "string_template", "string_template": { - "template": template_str, + "template": exception_message_template_str, "params": { "expectation_type": evr.expectation_config.expectation_type, "exception_message": evr.exception_info["exception_message"] @@ -177,8 +177,23 @@ def _get_unexpected_statement(cls, evr): }, }) + exception_traceback_collapse = CollapseContent(**{ + "collapse_toggle_link": "Show exception traceback...", + "collapse": [ + RenderedStringTemplateContent(**{ + "content_block_type": "string_template", + "string_template": { + "template": evr.exception_info["exception_traceback"], + "tag": "code" + } + }) + ] + }) + + return [exception_message, exception_traceback_collapse] + if success or not result.get("unexpected_count"): - return None + return [] else: unexpected_count = num_to_str(result["unexpected_count"], use_locale=True, precision=20) unexpected_percent = num_to_str(result["unexpected_percent"], precision=4) + "%" @@ -187,7 +202,8 @@ def _get_unexpected_statement(cls, evr): template_str = "\n\n$unexpected_count unexpected values found. " \ "$unexpected_percent of $element_count total rows." - return RenderedStringTemplateContent(**{ + return [ + RenderedStringTemplateContent(**{ "content_block_type": "string_template", "string_template": { "template": template_str, @@ -200,8 +216,8 @@ def _get_unexpected_statement(cls, evr): "styling": { "classes": ["text-danger"] } - } - }) + }}) + ] @classmethod def _get_kl_divergence_observed_value(cls, evr): @@ -239,11 +255,11 @@ def _get_kl_divergence_observed_value(cls, evr): @classmethod def _get_quantile_values_observed_value(cls, evr): - if evr.result is None: + if evr.result is None or evr.result.get("observed_value") is None: return "--" - quantiles = evr.result["observed_value"]["quantiles"] - value_ranges = evr.result["observed_value"]["values"] + quantiles = evr.result.get("observed_value", {}).get("quantiles", []) + value_ranges = evr.result.get("observed_value", {}).get("values", []) table_header_row = ["Quantile", "Value"] table_rows = [] @@ -333,7 +349,7 @@ def row_generator_fn(evr, styling=None, include_column_name=True): expectation_string_cell = expectation_string_fn(expectation, styling, include_column_name) status_cell = [cls._get_status_icon(evr)] - unexpected_statement = None + unexpected_statement = [] unexpected_table = None observed_value = ["--"] @@ -351,8 +367,9 @@ def row_generator_fn(evr, styling=None, include_column_name=True): logger.error("Exception occurred during data docs rendering: ", e, exc_info=True) # If the expectation has some unexpected values...: - if unexpected_statement or unexpected_table: - expectation_string_cell.append(unexpected_statement) + if unexpected_statement: + expectation_string_cell += unexpected_statement + if unexpected_table: expectation_string_cell.append(unexpected_table) if len(expectation_string_cell) > 1: diff --git a/great_expectations/render/renderer/page_renderer.py b/great_expectations/render/renderer/page_renderer.py index f110e1171467..7ce7153261db 100644 --- a/great_expectations/render/renderer/page_renderer.py +++ b/great_expectations/render/renderer/page_renderer.py @@ -43,6 +43,7 @@ def render(self, validation_results): run_id = validation_results.meta['run_id'] batch_id = BatchKwargs(validation_results.meta['batch_kwargs']).to_id() expectation_suite_name = validation_results.meta['expectation_suite_name'] + batch_kwargs = validation_results.meta.get("batch_kwargs") # Group EVRs by column columns = {} @@ -125,6 +126,7 @@ def render(self, validation_results): return RenderedDocumentContent(**{ "renderer_type": "ValidationResultsPageRenderer", "page_title": expectation_suite_name + " / " + run_id + " / " + batch_id, + "batch_kwargs": batch_kwargs, "expectation_suite_name": expectation_suite_name, "sections": sections, "utm_medium": "validation-results-page", @@ -613,6 +615,7 @@ def __init__(self, overview_section_renderer=None, column_section_renderer=None) def render(self, validation_results): run_id = validation_results.meta['run_id'] expectation_suite_name = validation_results.meta['expectation_suite_name'] + batch_kwargs = validation_results.meta.get("batch_kwargs") # Group EVRs by column #TODO: When we implement a ValidationResultSuite class, this method will move there. @@ -626,6 +629,7 @@ def render(self, validation_results): "page_title": run_id + "-" + expectation_suite_name + "-ProfilingResults", "expectation_suite_name": expectation_suite_name, "utm_medium": "profiling-results-page", + "batch_kwargs": batch_kwargs, "sections": [ self._overview_section_renderer.render( diff --git a/great_expectations/render/types/__init__.py b/great_expectations/render/types/__init__.py index 2e9bf842c1a7..eef285eb889f 100644 --- a/great_expectations/render/types/__init__.py +++ b/great_expectations/render/types/__init__.py @@ -250,7 +250,7 @@ def to_json_dict(self): class RenderedDocumentContent(RenderedContent): # NOTE: JPC 20191028 - review these keys to consolidate and group def __init__(self, sections, data_asset_name=None, full_data_asset_identifier=None, renderer_type=None, - page_title=None, utm_medium=None, cta_footer=None, expectation_suite_name=None): + page_title=None, utm_medium=None, cta_footer=None, expectation_suite_name=None, batch_kwargs=None): if not isinstance(sections, list) and all([isinstance(section, RenderedSectionContent) for section in sections]): raise InvalidRenderedContentError("RenderedDocumentContent requires a list of RenderedSectionContent for " @@ -263,6 +263,7 @@ def __init__(self, sections, data_asset_name=None, full_data_asset_identifier=No self.utm_medium = utm_medium self.cta_footer = cta_footer self.expectation_suite_name = expectation_suite_name + self.batch_kwargs = batch_kwargs def to_json_dict(self): d = super(RenderedDocumentContent, self).to_json_dict() @@ -274,6 +275,7 @@ def to_json_dict(self): d["utm_medium"] = self.utm_medium d["cta_footer"] = self.cta_footer d["expectation_suite_name"] = self.expectation_suite_name + d["batch_kwargs"] = self.batch_kwargs return d diff --git a/great_expectations/render/view/templates/edit_expectations_instructions_modal.j2 b/great_expectations/render/view/templates/edit_expectations_instructions_modal.j2 index a4c5493746aa..3d721a55c193 100644 --- a/great_expectations/render/view/templates/edit_expectations_instructions_modal.j2 +++ b/great_expectations/render/view/templates/edit_expectations_instructions_modal.j2 @@ -2,6 +2,13 @@ {% set expectation_suite_name_dot_count = expectation_suite_name.count(".") -%} {% endif %} +{% if batch_kwargs %} + {% set batch_kwargs = batch_kwargs | get_html_escaped_json_string_from_dict %} + {% set edit_suite_command = "great_expectations suite edit " + expectation_suite_name + " --batch_kwargs " + """ + batch_kwargs + """ %} +{% else %} + {% set edit_suite_command = "great_expectations suite edit " + expectation_suite_name %} +{% endif %} + {% if utm_medium == "validation-results-page" or utm_medium == "profiling-results-page" %} {% set static_images_dir = ((expectation_suite_name_dot_count + 3) * "../") + "static/images/" -%} {% elif utm_medium == "expectation-suite-page" %} @@ -34,7 +41,11 @@ $(function() {

Expectations are best edited interactively in Jupyter notebooks.

To automatically generate a notebook that does this run:

- + {% if batch_kwargs %} + + {% else %} + + {% endif %}
diff --git a/great_expectations/render/view/view.py b/great_expectations/render/view/view.py index 089d8e90cf78..d5d9f531c26f 100644 --- a/great_expectations/render/view/view.py +++ b/great_expectations/render/view/view.py @@ -100,6 +100,7 @@ def _get_template(self, template): env.filters['render_styling'] = self.render_styling env.filters['render_content_block'] = self.render_content_block env.filters['render_markdown'] = self.render_markdown + env.filters['get_html_escaped_json_string_from_dict'] = self.get_html_escaped_json_string_from_dict env.filters['generate_html_element_uuid'] = self.generate_html_element_uuid env.globals['ge_version'] = ge_version @@ -133,6 +134,9 @@ def render_content_block(self, context, content_block, index=None, content_block else: return template.render(context, content_block=content_block, index=index) + def get_html_escaped_json_string_from_dict(self, source_dict): + return json.dumps(source_dict).replace('"', '\\"').replace('"', '"') + def render_styling(self, styling): """Adds styling information suitable for an html tag. diff --git a/tests/render/test_render_ValidationResultsTableContentBlockRenderer.py b/tests/render/test_render_ValidationResultsTableContentBlockRenderer.py index 10715f11c224..4f70e7a66647 100644 --- a/tests/render/test_render_ValidationResultsTableContentBlockRenderer.py +++ b/tests/render/test_render_ValidationResultsTableContentBlockRenderer.py @@ -15,24 +15,63 @@ def test_ValidationResultsTableContentBlockRenderer_generate_expectation_row_wit result = ValidationResultsTableContentBlockRenderer.render([evr_failed_with_exception]).to_json_dict() print(result) expected_result = { - 'content_block_type': 'table', - 'styling': {'body': {'classes': ['table']}, 'classes': ['ml-2', 'mr-2', 'mt-0', 'mb-0', 'table-responsive']}, - 'table': [[{'content_block_type': 'string_template', - 'string_template': {'template': '$icon', 'params': {'icon': ''}, 'styling': {'params': { - 'icon': {'classes': ['fas', 'fa-exclamation-triangle', 'text-warning'], 'tag': 'i'}}}}}, [ - {'content_block_type': 'string_template', - 'string_template': {'template': '$column can match any distribution.', - 'params': {"column": "live", "partition_object": None, "threshold": None, - "result_format": "SUMMARY"}}}, - {'content_block_type': 'string_template', 'string_template': { - 'template': '\n\n$expectation_type raised an exception:\n$exception_message', - 'params': {'expectation_type': 'expect_column_kl_divergence_to_be_less_than', - 'exception_message': 'Invalid partition object.'}, 'tag': 'strong', - 'styling': {'classes': ['text-danger'], 'params': {'exception_message': {'tag': 'code'}, - 'expectation_type': { - 'classes': ['badge', 'badge-danger', - 'mb-2']}}}}}, None], - '--']], 'header_row': ['Status', 'Expectation', 'Observed Value']} + 'content_block_type': 'table', 'styling': { + 'body': {'classes': ['table']}, + 'classes': ['ml-2', 'mr-2', 'mt-0', 'mb-0', + 'table-responsive']}, 'table': [[{ + 'content_block_type': 'string_template', + 'string_template': { + 'template': '$icon', + 'params': { + 'icon': ''}, + 'styling': { + 'params': { + 'icon': { + 'classes': [ + 'fas', + 'fa-exclamation-triangle', + 'text-warning'], + 'tag': 'i'}}}}}, + [{ + 'content_block_type': 'string_template', + 'string_template': { + 'template': '$column can match any distribution.', + 'params': { + "column": "live", + "partition_object": None, + "threshold": None, + "result_format": "SUMMARY"}}}, + { + 'content_block_type': 'string_template', + 'string_template': { + 'template': '\n\n$expectation_type raised an exception:\n$exception_message', + 'params': { + 'expectation_type': 'expect_column_kl_divergence_to_be_less_than', + 'exception_message': 'Invalid partition object.'}, + 'tag': 'strong', + 'styling': { + 'classes': [ + 'text-danger'], + 'params': { + 'exception_message': { + 'tag': 'code'}, + 'expectation_type': { + 'classes': [ + 'badge', + 'badge-danger', + 'mb-2']}}}}}, + { + 'content_block_type': 'collapse', + 'collapse_toggle_link': 'Show exception traceback...', + 'collapse': [ + { + 'content_block_type': 'string_template', + 'string_template': { + 'template': 'Traceback (most recent call last):\n File "/great_expectations/great_expectations/data_asset/data_asset.py", line 216, in wrapper\n return_obj = func(self, **evaluation_args)\n File "/great_expectations/great_expectations/dataset/dataset.py", line 106, in inner_wrapper\n evaluation_result = func(self, column, *args, **kwargs)\n File "/great_expectations/great_expectations/dataset/dataset.py", line 3381, in expect_column_kl_divergence_to_be_less_than\n raise ValueError("Invalid partition object.")\nValueError: Invalid partition object.\n', + 'tag': 'code'}}], + 'inline_link': False}], + '--']], + 'header_row': ['Status', 'Expectation', 'Observed Value']} assert result == expected_result @@ -247,11 +286,11 @@ def test_ValidationResultsTableContentBlockRenderer_get_unexpected_statement(evr # test for succeeded evr output_1 = ValidationResultsTableContentBlockRenderer._get_unexpected_statement(evr_success) - assert output_1 is None + assert output_1 == [] # test for failed evr output_2 = ValidationResultsTableContentBlockRenderer._get_unexpected_statement(evr_failed) - assert output_2 == RenderedStringTemplateContent(**{ + assert output_2 == [RenderedStringTemplateContent(**{ "content_block_type": "string_template", "string_template": { "template": "\n\n$unexpected_count unexpected values found. $unexpected_percent of $element_count total rows.", @@ -267,17 +306,17 @@ def test_ValidationResultsTableContentBlockRenderer_get_unexpected_statement(evr ] } } - }) + })] # test for evr with no "result" key output_3 = ValidationResultsTableContentBlockRenderer._get_unexpected_statement(evr_no_result) print(json.dumps(output_3, indent=2)) - assert output_3 is None + assert output_3 == [] # test for evr with no unexpected count output_4 = ValidationResultsTableContentBlockRenderer._get_unexpected_statement(evr_failed_no_unexpected_count) print(output_4) - assert output_4 is None + assert output_4 == [] # test for evr with exception evr_failed_exception = ExpectationValidationResult( @@ -298,25 +337,18 @@ def test_ValidationResultsTableContentBlockRenderer_get_unexpected_statement(evr ) output_5 = ValidationResultsTableContentBlockRenderer._get_unexpected_statement(evr_failed_exception) - assert output_5 == RenderedStringTemplateContent(**{ - 'content_block_type': 'string_template', - 'string_template': { - 'template': '\n\n$expectation_type raised an exception:\n$exception_message', - 'params': { - 'expectation_type': 'expect_column_values_to_not_match_regex', - 'exception_message': 'Unrecognized column: not_a_real_column'}, - 'tag': 'strong', - 'styling': { - 'classes': ['text-danger'], - 'params': { - 'exception_message': {'tag': 'code'}, - 'expectation_type': { - 'classes': ['badge', 'badge-danger', 'mb-2'] - } - } - } - } - }) + output_5 = [content.to_json_dict() for content in output_5] + expected_output_5 = [{'content_block_type': 'string_template', 'string_template': { + 'template': '\n\n$expectation_type raised an exception:\n$exception_message', + 'params': {'expectation_type': 'expect_column_values_to_not_match_regex', + 'exception_message': 'Unrecognized column: not_a_real_column'}, 'tag': 'strong', + 'styling': {'classes': ['text-danger'], 'params': {'exception_message': {'tag': 'code'}, 'expectation_type': { + 'classes': ['badge', 'badge-danger', 'mb-2']}}}}}, + {'content_block_type': 'collapse', 'collapse_toggle_link': 'Show exception traceback...', + 'collapse': [{'content_block_type': 'string_template', 'string_template': { + 'template': 'Traceback (most recent call last):\n...more_traceback...', 'tag': 'code'}}], + 'inline_link': False}] + assert output_5 == expected_output_5 def test_ValidationResultsTableContentBlockRenderer_get_unexpected_table(evr_success):