Skip to content

Commit

Permalink
Merge pull request #14 from schireson/dc/prettify_error_output
Browse files Browse the repository at this point in the history
feat: Prettify error output.
  • Loading branch information
DanCardin committed Jun 26, 2020
2 parents 132732d + d9bcfcc commit cc06133
Show file tree
Hide file tree
Showing 6 changed files with 103 additions and 20 deletions.
8 changes: 7 additions & 1 deletion CHANGELOG.md
@@ -1,6 +1,12 @@
# Changelog

## Unreleased (2020-06-25)
## 0.2.3 (2020-06-26)

#### New Features

* Reduce the multiple pages of traceback output to a few lines of context that are actually meaningful to a failed test.

## v0.2.2 (2020-06-25)

#### New Features

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pytest-alembic"
version = "0.2.2"
version = "0.2.3"
description = "A pytest plugin for verifying alembic migrations."
authors = [
"Dan Cardin <ddcardin@gmail.com>",
Expand Down
8 changes: 7 additions & 1 deletion src/pytest_alembic/executor.py
@@ -1,4 +1,6 @@
import contextlib
import functools
import io
from dataclasses import dataclass
from io import StringIO
from typing import Dict, List, Optional, Union
Expand Down Expand Up @@ -46,7 +48,11 @@ def run_command(self, command, *args, **kwargs):

executable_command = getattr(alembic.command, command)
try:
executable_command(self.alembic_config, *args, **kwargs)
# Hide the (relatively) worthless logs of the upgrade revision path, it just clogs
# up the logs when errors actually occur, but without providing any context.
buffer = io.StringIO()
with contextlib.redirect_stderr(buffer):
executable_command(self.alembic_config, *args, **kwargs)
except alembic.util.exc.CommandError as e:
raise RuntimeError(e)

Expand Down
34 changes: 34 additions & 0 deletions src/pytest_alembic/plugin/error.py
@@ -0,0 +1,34 @@
import textwrap

from _pytest._code.code import FormattedExcinfo


class AlembicTestFailure(AssertionError):
def __init__(self, message, context=None):
super().__init__(message)
self.context = context


class AlembicReprError:
def __init__(self, exce, item):
self.exce = exce
self.item = item

def toterminal(self, tw):
"""Print out a custom error message to the terminal.
"""
exc = self.exce.value
context = exc.context

if context:
for title, item in context:
tw.line(title + ":", white=True, bold=True)
tw.line(textwrap.indent(item, " "), red=True)
tw.line("")

e = FormattedExcinfo()
lines = e.get_exconly(self.exce)

tw.line("Errors:", white=True, bold=True)
for line in lines:
tw.line(line, red=True, bold=True)
7 changes: 7 additions & 0 deletions src/pytest_alembic/plugin/plugin.py
Expand Up @@ -3,6 +3,8 @@
import pytest
from _pytest import fixtures

from pytest_alembic.plugin.error import AlembicReprError, AlembicTestFailure


def collect_all_tests():
from pytest_alembic import tests
Expand Down Expand Up @@ -101,3 +103,8 @@ def runtest(self):

def reportinfo(self):
return (self.fspath, 0, f"[pytest-alembic] {self.name}")

def repr_failure(self, excinfo):
if isinstance(excinfo.value, AlembicTestFailure):
return AlembicReprError(excinfo, self)
return super().repr_failure(excinfo)
64 changes: 47 additions & 17 deletions src/pytest_alembic/tests.py
Expand Up @@ -4,6 +4,8 @@
from alembic.autogenerate.api import AutogenContext
from alembic.autogenerate.render import _render_cmd_body

from pytest_alembic.plugin.error import AlembicTestFailure

log = logging.getLogger(__name__)


Expand All @@ -15,16 +17,28 @@ def test_single_head_revision(alembic_runner):
have only seen it be the result of uncaught merge conflicts resulting in a diverged history,
which lazily breaks during deployment.
"""
head_count = alembic_runner.heads
heads = alembic_runner.heads
head_count = len(heads)

assert len(head_count) <= 1 # nosec
if head_count != 1:
heads = "\n".join([h.strip() for h in heads])
raise AlembicTestFailure(
"Expected 1 head revision, found {}".format(head_count), context=[("Heads", heads)],
)


@pytest.mark.alembic
def test_upgrade(alembic_runner):
"""Assert that the revision history can be run through from base to head.
"""
alembic_runner.migrate_up_to("head")
try:
alembic_runner.migrate_up_to("head")
except RuntimeError as e:
raise AlembicTestFailure(
"Failed to upgrade to the head revision. This means the historical chain from an "
"empty database, to the current revision is not possible.",
context=[("Alembic Error", str(e))],
)


@pytest.mark.alembic
Expand All @@ -45,17 +59,21 @@ def verify_is_empty_revision(migration_context, __, directives):
autogen_context = AutogenContext(migration_context)
rendered_upgrade = _render_cmd_body(script.upgrade_ops, autogen_context)

assert migration_is_empty, ( # nosec
"The models decribing the DDL of your database are out of sync with the set of "
"steps described in the revision history. This usually means that someone has "
"made manual changes to the database's DDL, or some model has been changed "
"without also generating a migration to describe that change.\n\n"
"The upgrade which would have been generated would look like:\n\n{}".format(
rendered_upgrade
if not migration_is_empty:
raise AlembicTestFailure(
"The models decribing the DDL of your database are out of sync with the set of "
"steps described in the revision history. This usually means that someone has "
"made manual changes to the database's DDL, or some model has been changed "
"without also generating a migration to describe that change.",
context=[
(
"The upgrade which would have been generated would look like",
rendered_upgrade,
)
],
)
)

alembic_runner.migrate_up_to("head")
test_upgrade(alembic_runner)
alembic_runner.generate_revision(
message="test revision",
autogenerate=True,
Expand All @@ -73,8 +91,20 @@ def test_up_down_consistency(alembic_runner):
Individually upgrade to ensure that it's clear which revision caused the failure.
"""
for revision in alembic_runner.history.revisions:
alembic_runner.migrate_up_to(revision)

for revision in reversed(alembic_runner.history.revisions):
alembic_runner.migrate_down_to(revision)
try:
for revision in alembic_runner.history.revisions:
alembic_runner.migrate_up_to(revision)
except RuntimeError as e:
raise AlembicTestFailure(
"Failed to upgrade through each revision individually.",
context=[("Failing Revision", revision), ("Alembic Error", str(e))],
)

try:
for revision in reversed(alembic_runner.history.revisions):
alembic_runner.migrate_down_to(revision)
except RuntimeError as e:
raise AlembicTestFailure(
"Failed to downgrade through each revision individually.",
context=[("Failing Revision", revision), ("Alembic Error", str(e))],
)

0 comments on commit cc06133

Please sign in to comment.