diff --git a/great_expectations/cli/datasource.py b/great_expectations/cli/datasource.py index 18f71afb88bc..f81226a25e40 100644 --- a/great_expectations/cli/datasource.py +++ b/great_expectations/cli/datasource.py @@ -882,7 +882,7 @@ def create_expectation_suite( ) if profiling_results['success']: - build_docs(context, view=open_docs) + build_docs(context, view=False) if open_docs: # This is mostly to keep tests from spawning windows try: # TODO this is really brittle and not covered in tests diff --git a/great_expectations/data_asset/data_asset.py b/great_expectations/data_asset/data_asset.py index 0efa9c7d0937..b2a6174c815d 100644 --- a/great_expectations/data_asset/data_asset.py +++ b/great_expectations/data_asset/data_asset.py @@ -230,7 +230,7 @@ def wrapper(self, *args, **kwargs): if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() - exception_message = str(err) + exception_message = "{}: {}".format(type(err).__name__, str(err)) return_obj = ExpectationValidationResult(success=False) @@ -837,12 +837,12 @@ def save_expectation_suite( raise ValueError("Unable to save config: filepath or data_context must be available.") def validate(self, - expectation_suite=None, + expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, - catch_exceptions=True, - result_format=None, + catch_exceptions=True, + result_format=None, only_return_failures=False): """Generates a JSON-formatted report describing the outcome of all expectations. diff --git a/great_expectations/render/renderer/content_block/validation_results_table_content_block.py b/great_expectations/render/renderer/content_block/validation_results_table_content_block.py index 5f5e1fee2f9e..3137496f4bb7 100644 --- a/great_expectations/render/renderer/content_block/validation_results_table_content_block.py +++ b/great_expectations/render/renderer/content_block/validation_results_table_content_block.py @@ -10,7 +10,7 @@ RenderedContentBlockContainer, RenderedStringTemplateContent, RenderedTableContent, -) + CollapseContent) from great_expectations.render.util import num_to_str logger = logging.getLogger(__name__) @@ -152,12 +152,12 @@ def _get_unexpected_statement(cls, evr): result = evr.result if evr.exception_info["raised_exception"]: - template_str = "\n\n$expectation_type raised an exception:\n$exception_message" + exception_message_template_str = "\n\n$expectation_type raised an exception:\n$exception_message" - return RenderedStringTemplateContent(**{ + exception_message = RenderedStringTemplateContent(**{ "content_block_type": "string_template", "string_template": { - "template": template_str, + "template": exception_message_template_str, "params": { "expectation_type": evr.expectation_config.expectation_type, "exception_message": evr.exception_info["exception_message"] @@ -177,8 +177,23 @@ def _get_unexpected_statement(cls, evr): }, }) + exception_traceback_collapse = CollapseContent(**{ + "collapse_toggle_link": "Show exception traceback...", + "collapse": [ + RenderedStringTemplateContent(**{ + "content_block_type": "string_template", + "string_template": { + "template": evr.exception_info["exception_traceback"], + "tag": "code" + } + }) + ] + }) + + return [exception_message, exception_traceback_collapse] + if success or not result.get("unexpected_count"): - return None + return [] else: unexpected_count = num_to_str(result["unexpected_count"], use_locale=True, precision=20) unexpected_percent = num_to_str(result["unexpected_percent"], precision=4) + "%" @@ -187,7 +202,8 @@ def _get_unexpected_statement(cls, evr): template_str = "\n\n$unexpected_count unexpected values found. " \ "$unexpected_percent of $element_count total rows." - return RenderedStringTemplateContent(**{ + return [ + RenderedStringTemplateContent(**{ "content_block_type": "string_template", "string_template": { "template": template_str, @@ -200,8 +216,8 @@ def _get_unexpected_statement(cls, evr): "styling": { "classes": ["text-danger"] } - } - }) + }}) + ] @classmethod def _get_kl_divergence_observed_value(cls, evr): @@ -239,11 +255,11 @@ def _get_kl_divergence_observed_value(cls, evr): @classmethod def _get_quantile_values_observed_value(cls, evr): - if evr.result is None: + if evr.result is None or evr.result.get("observed_value") is None: return "--" - quantiles = evr.result["observed_value"]["quantiles"] - value_ranges = evr.result["observed_value"]["values"] + quantiles = evr.result.get("observed_value", {}).get("quantiles", []) + value_ranges = evr.result.get("observed_value", {}).get("values", []) table_header_row = ["Quantile", "Value"] table_rows = [] @@ -333,7 +349,7 @@ def row_generator_fn(evr, styling=None, include_column_name=True): expectation_string_cell = expectation_string_fn(expectation, styling, include_column_name) status_cell = [cls._get_status_icon(evr)] - unexpected_statement = None + unexpected_statement = [] unexpected_table = None observed_value = ["--"] @@ -351,8 +367,9 @@ def row_generator_fn(evr, styling=None, include_column_name=True): logger.error("Exception occurred during data docs rendering: ", e, exc_info=True) # If the expectation has some unexpected values...: - if unexpected_statement or unexpected_table: - expectation_string_cell.append(unexpected_statement) + if unexpected_statement: + expectation_string_cell += unexpected_statement + if unexpected_table: expectation_string_cell.append(unexpected_table) if len(expectation_string_cell) > 1: diff --git a/great_expectations/render/renderer/page_renderer.py b/great_expectations/render/renderer/page_renderer.py index f110e1171467..7ce7153261db 100644 --- a/great_expectations/render/renderer/page_renderer.py +++ b/great_expectations/render/renderer/page_renderer.py @@ -43,6 +43,7 @@ def render(self, validation_results): run_id = validation_results.meta['run_id'] batch_id = BatchKwargs(validation_results.meta['batch_kwargs']).to_id() expectation_suite_name = validation_results.meta['expectation_suite_name'] + batch_kwargs = validation_results.meta.get("batch_kwargs") # Group EVRs by column columns = {} @@ -125,6 +126,7 @@ def render(self, validation_results): return RenderedDocumentContent(**{ "renderer_type": "ValidationResultsPageRenderer", "page_title": expectation_suite_name + " / " + run_id + " / " + batch_id, + "batch_kwargs": batch_kwargs, "expectation_suite_name": expectation_suite_name, "sections": sections, "utm_medium": "validation-results-page", @@ -613,6 +615,7 @@ def __init__(self, overview_section_renderer=None, column_section_renderer=None) def render(self, validation_results): run_id = validation_results.meta['run_id'] expectation_suite_name = validation_results.meta['expectation_suite_name'] + batch_kwargs = validation_results.meta.get("batch_kwargs") # Group EVRs by column #TODO: When we implement a ValidationResultSuite class, this method will move there. @@ -626,6 +629,7 @@ def render(self, validation_results): "page_title": run_id + "-" + expectation_suite_name + "-ProfilingResults", "expectation_suite_name": expectation_suite_name, "utm_medium": "profiling-results-page", + "batch_kwargs": batch_kwargs, "sections": [ self._overview_section_renderer.render( diff --git a/great_expectations/render/types/__init__.py b/great_expectations/render/types/__init__.py index 2e9bf842c1a7..eef285eb889f 100644 --- a/great_expectations/render/types/__init__.py +++ b/great_expectations/render/types/__init__.py @@ -250,7 +250,7 @@ def to_json_dict(self): class RenderedDocumentContent(RenderedContent): # NOTE: JPC 20191028 - review these keys to consolidate and group def __init__(self, sections, data_asset_name=None, full_data_asset_identifier=None, renderer_type=None, - page_title=None, utm_medium=None, cta_footer=None, expectation_suite_name=None): + page_title=None, utm_medium=None, cta_footer=None, expectation_suite_name=None, batch_kwargs=None): if not isinstance(sections, list) and all([isinstance(section, RenderedSectionContent) for section in sections]): raise InvalidRenderedContentError("RenderedDocumentContent requires a list of RenderedSectionContent for " @@ -263,6 +263,7 @@ def __init__(self, sections, data_asset_name=None, full_data_asset_identifier=No self.utm_medium = utm_medium self.cta_footer = cta_footer self.expectation_suite_name = expectation_suite_name + self.batch_kwargs = batch_kwargs def to_json_dict(self): d = super(RenderedDocumentContent, self).to_json_dict() @@ -274,6 +275,7 @@ def to_json_dict(self): d["utm_medium"] = self.utm_medium d["cta_footer"] = self.cta_footer d["expectation_suite_name"] = self.expectation_suite_name + d["batch_kwargs"] = self.batch_kwargs return d diff --git a/great_expectations/render/view/templates/edit_expectations_instructions_modal.j2 b/great_expectations/render/view/templates/edit_expectations_instructions_modal.j2 index a4c5493746aa..3d721a55c193 100644 --- a/great_expectations/render/view/templates/edit_expectations_instructions_modal.j2 +++ b/great_expectations/render/view/templates/edit_expectations_instructions_modal.j2 @@ -2,6 +2,13 @@ {% set expectation_suite_name_dot_count = expectation_suite_name.count(".") -%} {% endif %} +{% if batch_kwargs %} + {% set batch_kwargs = batch_kwargs | get_html_escaped_json_string_from_dict %} + {% set edit_suite_command = "great_expectations suite edit " + expectation_suite_name + " --batch_kwargs " + """ + batch_kwargs + """ %} +{% else %} + {% set edit_suite_command = "great_expectations suite edit " + expectation_suite_name %} +{% endif %} + {% if utm_medium == "validation-results-page" or utm_medium == "profiling-results-page" %} {% set static_images_dir = ((expectation_suite_name_dot_count + 3) * "../") + "static/images/" -%} {% elif utm_medium == "expectation-suite-page" %} @@ -34,7 +41,11 @@ $(function() {
Expectations are best edited interactively in Jupyter notebooks.
To automatically generate a notebook that does this run: