Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow outputing test results as allure framework results. #1115

Merged
merged 2 commits into from Dec 28, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 6 additions & 2 deletions planemo/commands/cmd_test_reports.py
@@ -1,5 +1,6 @@
"""Module describing the planemo ``test_reports`` command."""
import os
import datetime
import pathlib

import click

Expand All @@ -19,10 +20,13 @@ def cli(ctx, path, **kwds):
Creates reports in various formats (HTML, text, markdown)
from the structured test output (tool_test_output.json).
"""
if not os.path.exists(path):
fname = pathlib.Path(path)
if not fname.exists():
io.error("Failed to tool test json file at %s" % path)
return 1

test_data = StructuredData(path)
test_data.calculate_summary_data_if_needed()
file_modication_datatime = datetime.datetime.fromtimestamp(fname.stat().st_mtime)
kwds["file_modication_datatime"] = file_modication_datatime
handle_reports(ctx, test_data.structured_data, kwds)
12 changes: 10 additions & 2 deletions planemo/galaxy/test/actions.py
Expand Up @@ -18,7 +18,10 @@
setup_venv,
)
from planemo.io import error, info, shell_join, warn
from planemo.reports import build_report
from planemo.reports import (
allure,
build_report,
)
from planemo.test.results import get_dict_value
from . import structures as test_structures

Expand Down Expand Up @@ -169,7 +172,7 @@ def handle_reports(ctx, structured_data, kwds):
except Exception as e:
exceptions.append(e)

for report_type in ["html", "markdown", "text", "xunit", "junit"]:
for report_type in ["html", "markdown", "text", "xunit", "junit", "allure"]:
try:
_handle_test_output_file(
ctx, report_type, structured_data, kwds
Expand All @@ -192,6 +195,11 @@ def _handle_test_output_file(ctx, report_type, test_data, kwds):
ctx.vlog(message)
return

if report_type == "allure":
file_modication_datatime = kwds.get("file_modication_datatime")
allure.write_results(path, test_data, file_modication_datatime=file_modication_datatime)
return

try:
contents = build_report.build_report(
test_data, report_type=report_type
Expand Down
7 changes: 7 additions & 0 deletions planemo/options.py
Expand Up @@ -1502,6 +1502,13 @@ def test_report_options():
help=("Output test report (jUnit style - for CI systems"),
default=None,
),
planemo_option(
"--test_output_allure",
type=click.Path(file_okay=False, resolve_path=True),
use_global_config=True,
help=("Output test allure2 framework resutls"),
default=None,
)
)


Expand Down
180 changes: 180 additions & 0 deletions planemo/reports/allure.py
@@ -0,0 +1,180 @@
import json

from allure_commons import plugin_manager
from allure_commons.lifecycle import AllureLifecycle
from allure_commons.logger import AllureFileLogger
from allure_commons.model2 import (
Label,
Link,
Status,
StatusDetails,
)
from allure_commons.types import AttachmentType, LabelType, LinkType
from allure_commons.utils import (
escape_non_unicode_symbols,
md5,
platform_label,
uuid4,
)
from dateutil import parser
from galaxy.util import (
safe_makedirs,
)

JSON_INDENT = 2


class AllureListener(object):
def __init__(self, lifecycle):
self.lifecycle = lifecycle


class AllureWriter:

def __init__(self, results_path):
safe_makedirs(results_path)
self.lifecycle = AllureLifecycle()
self.logger = AllureFileLogger(results_path)
self.listener = AllureListener(self.lifecycle)

def process(self, structured_data, file_modication_datetime=None):
plugin_manager.register(self.listener)
plugin_manager.register(self.logger)

for test_case in structured_data["tests"]:
self.process_test_case(test_case, file_modication_datetime=file_modication_datetime)

plugin_manager.unregister(plugin=self.listener)
plugin_manager.unregister(plugin=self.logger)

def process_test_case(self, test_case, file_modication_datetime=None):

with self.lifecycle.schedule_test_case() as test_result:
test_index = test_case["id"]
test_data = test_case.get("data") or {}
job = test_data.get("job") or {}
test_result.name = test_index
self._record_start_stop(test_result, file_modication_datetime, job)

test_result.fullName = test_index
test_result.testCaseId = md5(test_index)
test_result.historyId = md5(test_index)
tool_id = self._record_suite_labels(test_result, test_data, job)

self._attach_data("test_data", json.dumps(test_data, indent=JSON_INDENT), attachment_type=AttachmentType.JSON)
for key in ["stderr", "stdout", "command_line", "external_id", "job_messages"]:
val = job.get(key)
if not val:
continue
if isinstance(val, list):
attachment_type = AttachmentType.JSON
# job messages
val = json.dumps(val, indent=JSON_INDENT)
else:
if not val.strip():
continue
attachment_type = AttachmentType.TEXT
self._attach_data(key, val, attachment_type=attachment_type)

problem_message = None
for key in ["execution_problem", "output_problems"]:
val = test_data.get(key)
if not val:
continue
if isinstance(val, list) and val:
# remove duplicated messages...
val = list(set(val))

attachment_type = AttachmentType.HTML
as_html_list = "<ul>"
as_html_list += "\n".join([f"<li><pre>{v}</pre></li>" for v in val])
as_html_list += "</ul>"
problem_message = val[0]
val = as_html_list
else:
if not val.strip():
continue
attachment_type = AttachmentType.TEXT
problem_message = val
self._attach_data(key, val, attachment_type=attachment_type)

if problem_message is None and "job_messages" in job:
job_messages = job.get("job_messages")
if job_messages:
problem_message = str(job_messages)

test_result.labels.append(Label(name=LabelType.FRAMEWORK, value='planemo'))
test_result.labels.append(Label(name=LabelType.LANGUAGE, value=platform_label()))

self._record_tool_link(test_result, tool_id)
self._record_status(test_result, test_data)
if test_result.status in [Status.BROKEN, Status.FAILED]:
test_result.statusDetails = StatusDetails(
message=escape_non_unicode_symbols(problem_message or "Unknown problem"),
trace=None
)

self.lifecycle.write_test_case()

def _record_start_stop(self, test_result, file_modication_datetime, job):
start_datetime = file_modication_datetime
end_datetime = file_modication_datetime
if "create_time" in job:
start_datetime = parser.parse(job["create_time"])

if "update_time" in job:
end_datetime = parser.parse(job["update_time"])

if start_datetime is not None:
test_result.start = int(round(start_datetime.timestamp() * 1000))
if end_datetime is not None:
test_result.stop = int(round(end_datetime.timestamp() * 1000))

def _record_suite_labels(self, test_result, test_data, job):
tool_id = None
if "tool_id" in test_data:
tool_id = test_data["tool_id"]
test_result.labels.append(Label(name=LabelType.PARENT_SUITE, value=tool_id))
elif "tool_id" in job:
tool_id = job["tool_id"]
test_result.labels.append(Label(name=LabelType.PARENT_SUITE, value=tool_id))

if "tool_version" in test_data:
test_result.labels.append(Label(name=LabelType.SUITE, value=test_data["tool_version"]))
elif "tool_version" in job:
test_result.labels.append(Label(name=LabelType.SUITE, value=job["tool_version"]))

if "test_index" in test_data:
test_result.labels.append(Label(name=LabelType.SUB_SUITE, value=str(test_data["test_index"])))
return tool_id

def _record_tool_link(self, test_result, tool_id):
if tool_id and 'repos' in tool_id:
tool_parts = tool_id.split("/")
if len(tool_parts) >= 4:
link = Link(LinkType.LINK, "https://%s" % "/".join(tool_parts[0:4]), "Tool Repository")
test_result.links.append(link)

def _record_status(self, test_result, test_data):
status = test_data.get("status", "error")
if status == "success":
test_result.status = Status.PASSED
elif status == "failure":
test_result.status = Status.FAILED
elif status == "skip":
test_result.status = Status.SKIPPED
else:
test_result.status = Status.BROKEN

def _attach_data(self, key, val, attachment_type=AttachmentType.TEXT):
self.lifecycle.attach_data(
uuid4(),
val,
name=key,
attachment_type=attachment_type,
extension=None
)


def write_results(results_path, structured_data, **kwds):
AllureWriter(results_path).process(structured_data)
1 change: 1 addition & 0 deletions requirements.txt
@@ -1,3 +1,4 @@
allure-python-commons
BeautifulSoup4
bioblend>=0.14.0
Click
Expand Down
Empty file added tests/test_allure.py
Empty file.
Expand Up @@ -3,7 +3,7 @@
from .test_utils import CliTestCase, TEST_DATA_DIR


class TestReportsTestCase(CliTestCase):
class CmdMergeReportsTestCase(CliTestCase):

def test_merge_reports(self):
with self._isolate():
Expand Down
20 changes: 20 additions & 0 deletions tests/test_cmd_test_reports.py
@@ -0,0 +1,20 @@
import os

from .test_utils import CliTestCase, TEST_DATA_DIR


class CmdTestReportsTestCase(CliTestCase):

def test_build_reports(self):
with self._isolate():
json_path = os.path.join(TEST_DATA_DIR, "issue381.json")
self._check_exit_code(["test_reports", json_path], exit_code=0)

def test_allure(self):
with self._isolate() as f:
json_path = os.path.join(TEST_DATA_DIR, "issue381.json")
results_path = os.path.join(f, "allure_results")
self._check_exit_code(["test_reports", "--test_output_allure", results_path, json_path], exit_code=0)
assert os.path.exists(results_path)
assert os.path.isdir(results_path)
assert len(os.listdir(results_path))
11 changes: 0 additions & 11 deletions tests/test_test_report.py

This file was deleted.