Skip to content

Commit

Permalink
Move the contents of the "Notes on specific metrics" and "Notes on sp… (
Browse files Browse the repository at this point in the history
#4528)

* Move the contents of the "Notes on specific metrics" and "Notes on specific sources" from the user manual to the reference manual. Closes #4446.
  • Loading branch information
fniessink committed Sep 8, 2022
1 parent c87f094 commit 0a2a6ec
Show file tree
Hide file tree
Showing 14 changed files with 215 additions and 178 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,11 @@ class Metric(DescribedModel):
near_target: str = "10"
sources: list[str] = Field(..., min_items=1)
tags: list[Tag] = []
rationale: Optional[str] = ""
rationale: str = "" # Answers the question "Why measure this metric?", included in documentation and UI
rationale_urls: list[str] = []
explanation: Optional[str] = ""
explanation: Optional[str] = "" # Optional explanation of concepts in text format, included in documentation and UI
explanation_urls: list[str] = []
documentation: Optional[str] = "" # Optional documentation in Markdown format, only included in the documentation

@validator("default_scale", always=True)
def set_default_scale(cls, default_scale, values): # pylint: disable=no-self-argument
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def check_logos(cls, values):
def check_urls(cls, values):
"""Check that all sources, except the ones specified below, have a URL."""
for source_type, source in values.items():
if source_type not in ("calendar", "manual_number") and not source.url:
if source_type not in ("calendar", "generic_json", "manual_number") and not source.url:
raise ValueError(f"Source {source_type} has no URL")

@classmethod
Expand Down
89 changes: 89 additions & 0 deletions components/shared_data_model/src/shared_data_model/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,11 @@
description="The amount of merge requests.",
rationale="Merge requests need to be reviewed and approved. This metric allows for measuring the number of "
"merge requests without the required approvals.",
documentation="""In itself, the number of merge requests is not indicative of software quality. However, by
setting the parameter "Minimum number of upvotes", the metric can report on merge requests that have fewer than the
minimum number of upvotes. The parameter "Merge request state" can be used to exclude closed merge requests, for
example. The parameter "Target branches to include" can be used to further limit the merge requests to only count merge
requests that target specific branches, for example the "develop" branch.""",
scales=["count", "percentage"],
unit=Unit.MERGE_REQUESTS,
sources=["azure_devops", "gitlab", "manual_number"],
Expand All @@ -187,6 +192,30 @@
rationale="Use this metric to monitor other quality reports. For example, count the number of metrics that "
"don't meet their target value, or count the number of metrics that have been marked as technical debt for "
"more than two months.",
documentation="""After adding *Quality-time* as a source to a "Metrics"-metric, one can configure which
statuses to count and which metrics to consider by filtering on report names or identifiers, on metric types, on source
types, and on tags.
```{image} screenshots/editing_quality_time_source.png
:alt: Screenshot of dialog to edit *Quality-time* source showing fields for source type, source name, and source \
parameters such as URL, metric statuses, report names, and metric types
:class: only-light
```
```{image} screenshots/editing_quality_time_source_dark.png
:alt: Screenshot of dialog to edit *Quality-time* source showing fields for source type, source name, and source \
parameters such as URL, metric statuses, report names, and metric types
:class: only-dark
```
```{note}
If the "Metrics" metric is itself part of the set of metrics it counts, a peculiar situation may occur: when you've
configured the "Metrics" to count red metrics and its target is not met, the metric itself will become red and thus be
counted as well. For example, if the target is at most five red metrics, and the number of red metrics increases from
five to six, the "Metrics" value will go from five to seven. You can prevent this by making sure the "Metrics" metric is
not in the set of counted metrics, for example by putting it in a different report and only count metrics in the other
report(s).
```""",
scales=["count", "percentage"],
unit=Unit.METRICS,
near_target="5",
Expand Down Expand Up @@ -417,6 +446,58 @@
description="The amount of test cases.",
rationale="Track the test results of test cases so there is traceability from the test cases, "
"defined in Jira, to the test results in test reports produced by tools such as Robot Framework or Junit.",
documentation="""The test cases metric reports on the number of test cases, and their test results. The
test case metric is different than other metrics because it combines data from two types of sources: it needs one or
more sources for the test cases, and one or more sources for the test results. The test case metric then matches the
test results with the test cases.
Currently, only {index}`Jira` is supported as source for the test cases. {index}`JUnit`, {index}`TestNG`, and
{index}`Robot Framework` are supported as source for the test results. So, to configure the test cases metric, you need
to add at least one Jira source and one JUnit, TestNG, Robot Framework source. In addition, to allow the test case
metric to match test cases from Jira with test results from the JUnit, TestNG, or Robot Framework XML files, the test
results should mention Jira issue keys in their title or description.
Suppose you have configured Jira with the query: `project = "My Project" and type = "Logical Test Case"` and this
results in these test cases:
| Key | Summary |
|------|-------------|
| MP-1 | Test case 1 |
| MP-2 | Test case 2 |
| MP-3 | Test case 3 |
| MP-4 | Test case 4 |
Also suppose your JUnit XML has the following test results:
```xml
<testsuite tests="5" errors="0" failures="1" skipped="1">
<testcase name="MP-1; step 1">
<failure />
</testcase>
<testcase name="MP-1; step 2">
<skipped />
</testcase>
<testcase name="MP-2">
<skipped />
</testcase>
<testcase name="MP-3; step 1"/>
<testcase name="MP-3; step 2"/>
</testsuite>
```
The test case metric will combine the JUnit XML file with the test cases from Jira and report one failed, one skipped,
one passed, and one untested test case:
| Key | Summary | Test result |
|------|-------------|-------------|
| MP-1 | Test case 1 | failed |
| MP-2 | Test case 2 | skipped |
| MP-3 | Test case 3 | passed |
| MP-4 | Test case 4 | untested |
If multiple test results in the JUnit, TestNG, or Robot Framework XML file map to one Jira test case (as with MP-1 and
MP-3 above), the 'worst' test result is reported. Possible test results from worst to best are: errored, failed,
skipped, and passed. Test cases not found in the test results are listed as untested (as with MP-4 above).""",
scales=["count", "percentage"],
unit=Unit.TEST_CASES,
direction=Direction.MORE_IS_BETTER,
Expand Down Expand Up @@ -505,6 +586,14 @@
rationale="It is strange if branches have had no activity for a while and have not been merged to the "
"default branch. Maybe commits have been cherry picked, or maybe the work has been postponed, but it "
"also sometimes happen that someone simply forgets to merge the branch.",
documentation="""To change how soon *Quality-time* should consider branches to be inactive, use the
parameter "Number of days since last commit after which to consider branches inactive".
What exactly is the default branch is configured in GitLab or Azure DevOps. If you want to use a different branch
as default branch, you need to configure this in the source, see the documentation for
[GitLab](https://docs.gitlab.com/ee/user/project/repository/branches/default.html) or
[Azure DevOps](https://docs.microsoft.com/en-us/azure/devops/repos/git/manage-your-branches?view=azure-devops#\
change-your-default-branch).""",
unit=Unit.BRANCHES,
near_target="5",
sources=["azure_devops", "gitlab", "manual_number"],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from ..meta.source import Source
from ..parameters import access_parameters, Severities, URL

from .jenkins import jenkins_access_parameters
from .jenkins import jenkins_access_parameters, JENKINS_TOKEN_DOCS


ALL_ANCHORE_METRICS = ["security_warnings", "source_up_to_dateness"]
Expand Down Expand Up @@ -52,6 +52,7 @@
ANCHORE_JENKINS_PLUGIN = Source(
name="Anchore Jenkins plugin",
description="A Jenkins job with an Anchore report produced by the Anchore Jenkins plugin.",
documentation=dict(security_warnings=JENKINS_TOKEN_DOCS, source_up_to_dateness=JENKINS_TOKEN_DOCS),
url="https://plugins.jenkins.io/anchore-container-scanner/",
parameters=dict(
severities=SEVERITIES,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
from ..meta.source import Source
from ..parameters import access_parameters

from .jenkins import jenkins_access_parameters
from .jenkins import JENKINS_TOKEN_DOCS, jenkins_access_parameters


COBERTURA = Source(
name="Cobertura",
Expand All @@ -25,6 +26,11 @@
name="Cobertura Jenkins plugin",
description="Jenkins plugin for Cobertura, a free Java tool that calculates the percentage of code accessed "
"by tests.",
documentation=dict(
source_up_to_dateness=JENKINS_TOKEN_DOCS,
uncovered_branches=JENKINS_TOKEN_DOCS,
uncovered_lines=JENKINS_TOKEN_DOCS,
),
url="https://plugins.jenkins.io/cobertura/",
parameters=jenkins_access_parameters(
["source_up_to_dateness", "uncovered_branches", "uncovered_lines"],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,40 @@
from ..parameters import access_parameters, Severities


DOCUMENTATION = """In some cases, there are security vulnerabilities not found by automated tools.
Quality-time has the ability to parse security warnings from JSON files with a generic format.
The JSON format consists of an object with one key `vulnerabilities`. The value should be a list of vulnerabilities.
Each vulnerability is an object with three keys: `title`, `description`, and `severity`. The title and description
values should be strings. The severity is also a string and can be either `low`, `medium`, or `high`.
Example generic JSON file:
```json
{
"vulnerabilities": [
{
"title": "ISO27001:2013 A9 Insufficient Access Control",
"description": "The Application does not enforce Two-Factor Authentication and therefore not satisfy \
security best practices.",
"severity": "high"
},
{
"title": "Threat Model Finding: Uploading Malicious of Malicious files",
"description": "An attacker can upload malicious files with low privileges can perform direct API calls \
and perform unwanted mutations or see unauthorized information.",
"severity": "medium"
}
]
}
```
"""


GENERIC_JSON = Source(
name="JSON file with security warnings",
description="A generic vulnerability report with security warnings in JSON format.",
url="https://quality-time.readthedocs.io/en/latest/usage.html#generic-json-format-for-security-warnings",
documentation=dict(generic=DOCUMENTATION),
parameters=dict(
severities=Severities(values=["low", "medium", "high"]),
**access_parameters(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,40 @@
description="GitLab provides Git-repositories, wiki's, issue-tracking and continuous integration/continuous "
"deployment pipelines.",
url="https://about.gitlab.com/",
documentation=dict(
generic="""```{note}
Some metric sources are documents in JSON, XML, CSV, or HTML format. Examples include JUnit XML reports, JaCoCo XML
reports and Axe CSV reports. Usually, you add a JUnit (or JaCoCo, or Axe...) source and then simply configure the same
URL that you use to access the document via the browser. Unfortunately, this does not work if the document is stored in
GitLab. In that case, you still use the JUnit (or JaCoCo, or Axe...) source, but provide a GitLab API URL as URL.
Depending on where the document is stored in GitLab, there are two scenarios; the source is a build artifact of a GitLab
CI pipeline, or the source is stored in a GitLab repository:
1. When the metric source is a build artifact of a GitLab CI pipeline, use [URLs of the following format](https://docs.\
gitlab.com/ee/api/job_artifacts.html#download-a-single-artifact-file-from-specific-tag-or-branch):
`https://<gitlab-server>/api/v4/projects/<project-id>/jobs/artifacts/<branch>/raw/<path>/<to>/<file-name>?\
job=<job-name>`
The project id can be found under the
[project's general settings](https://docs.gitlab.com/ee/user/project/settings/).
If the repository is private, you also need to enter an [personal access token](https://docs.gitlab.com/ee/user/\
profile/personal_access_tokens.html) with the scope `read_api` in the private token field.
2. When the metric source is a file stored in a GitLab repository, use [URLs of the following format](https://docs.\
gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository):
`https://<gitlab-server>/api/v4/projects/<project-id>/repository/files/<file-path-with-slashes-%2F-encoded>/raw?\
ref=<branch>`
The project id can be found under the
[project's general settings](https://docs.gitlab.com/ee/user/project/settings/).
If the repository is private, you also need to enter an [personal access token](https://docs.gitlab.com/ee/user/\
profile/personal_access_tokens.html) with the scope `read_repository` in the private token field.
```"""
),
parameters=dict(
url=URL(
name="GitLab instance URL",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from ..meta.source import Source
from ..parameters import access_parameters

from .jenkins import jenkins_access_parameters
from .jenkins import jenkins_access_parameters, JENKINS_TOKEN_DOCS


ALL_JACOCO_METRICS = ["source_up_to_dateness", "uncovered_branches", "uncovered_lines"]
Expand All @@ -18,6 +18,11 @@
JACOCO_JENKINS_PLUGIN = Source(
name="JaCoCo Jenkins plugin",
description="A Jenkins job with a JaCoCo coverage report produced by the JaCoCo Jenkins plugin.",
documentation=dict(
source_up_to_dateness=JENKINS_TOKEN_DOCS,
uncovered_branches=JENKINS_TOKEN_DOCS,
uncovered_lines=JENKINS_TOKEN_DOCS,
),
url="https://plugins.jenkins.io/jacoco",
parameters=jenkins_access_parameters(
ALL_JACOCO_METRICS,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,15 @@ def jenkins_access_parameters(*args, **kwargs):
return access_parameters(*args, **kwargs)


# Put the Jenkins token documentation in a temporary variable that doesn't trigger a security warning so we can
# suppress the false positive Bandit warning below.
_TMP_DOC = """To authorize *Quality-time* for (non-public resources in) Jenkins, you can either use a username
and password or a username and
[API token](https://www.jenkins.io/doc/book/system-administration/authenticating-scripted-clients/). Note that, unlike
other sources, when using the API token Jenkins also requires the username to which the token
belongs."""
JENKINS_TOKEN_DOCS = _TMP_DOC # nosec hardcoded_password_string

ALL_JENKINS_METRICS = ["failed_jobs", "source_up_to_dateness", "source_version", "unused_jobs"]

JOB_ENTITY = dict(
Expand All @@ -42,6 +51,12 @@ def jenkins_access_parameters(*args, **kwargs):
JENKINS = Source(
name="Jenkins",
description="Jenkins is an open source continuous integration/continuous deployment server.",
documentation=dict(
unused_jobs=JENKINS_TOKEN_DOCS,
failed_jobs=JENKINS_TOKEN_DOCS,
source_up_to_dateness=JENKINS_TOKEN_DOCS,
source_version=JENKINS_TOKEN_DOCS,
),
url="https://www.jenkins.io/",
parameters=dict(
inactive_days=Days(
Expand Down Expand Up @@ -103,6 +118,9 @@ def jenkins_access_parameters(*args, **kwargs):
JENKINS_TEST_REPORT = Source(
name="Jenkins test report",
description="A Jenkins job with test results.",
documentation=dict(
test_cases=JENKINS_TOKEN_DOCS, tests=JENKINS_TOKEN_DOCS, source_up_to_dateness=JENKINS_TOKEN_DOCS
),
url="https://plugins.jenkins.io/junit",
parameters=dict(
test_result=TestResult(values=["failed", "passed", "skipped"]),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,13 @@
MANUAL_NUMBER = Source(
name="Manual number",
description="A number entered manually by a Quality-time user.",
documentation=dict(
generic="""The manual number source supports all metric types that take a number as value.
Because users have to keep the value up to date by hand, this source is only meant to be used as a temporary
solution for when no automated source is available yet. For example, when the results of a security audit are only
available in a PDF-report, a 'security warnings' metric can be added with the number of findings as manual number
source."""
),
parameters=dict(
number=IntegerParameter(
name="Number",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from ..meta.source import Source
from ..parameters import access_parameters, TestResult

from .jenkins import jenkins_access_parameters
from .jenkins import JENKINS_TOKEN_DOCS, jenkins_access_parameters


ALL_ROBOT_FRAMEWORK_METRICS = ["source_up_to_dateness", "source_version", "test_cases", "tests"]
Expand Down Expand Up @@ -35,6 +35,7 @@
name="Robot Framework Jenkins plugin",
description="A Jenkins plugin for Robot Framework, a generic open source automation framework for acceptance "
"testing, acceptance test driven development, and robotic process automation.",
documentation=dict(source_up_to_dateness=JENKINS_TOKEN_DOCS, tests=JENKINS_TOKEN_DOCS),
url="https://plugins.jenkins.io/robot/",
parameters=dict(
test_result=TestResult(values=["fail", "pass"], api_values={"fail": "overallFailed", "pass": "overallPassed"}),
Expand Down

0 comments on commit 0a2a6ec

Please sign in to comment.