diff --git a/clair/docinfo.xml b/clair/docinfo.xml
new file mode 100644
index 000000000..457153dd4
--- /dev/null
+++ b/clair/docinfo.xml
@@ -0,0 +1,10 @@
+{productname}
+{producty}
+Vulnerability reporting with Clair on {productname}
+
+ Get started with {productname}
+
+
+ Red Hat OpenShift Documentation Team
+
+
diff --git a/clair/master.adoc b/clair/master.adoc
new file mode 100644
index 000000000..15ad9aeb1
--- /dev/null
+++ b/clair/master.adoc
@@ -0,0 +1,70 @@
+:_content-type: ASSEMBLY
+
+include::modules/attributes.adoc[]
+
+[id="vulnerability-reporting-clair-quay-preface"]
+= Vulnerability reporting with Clair on {productname} preface
+
+The contents within this guide provide an overview of Clair for {productname}, running Clair on standalone {productname} and Operator deployments, and advanced Clair configuration.
+
+[id="vulnerability-reporting-clair-quay-overview"]
+= Vulnerability reporting with Clair on {productname} overview
+
+The content in this guide explains the key purposes and concepts of Clair on {productname}. It also contains information about Clair releases and the location of official Clair containers.
+
+include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1]
+include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2]
+include::modules/clair-concepts.adoc[leveloffset=+1]
+// include::modules/internal-api.adoc[leveloffset=+2]
+include::modules/clair-authentication.adoc[leveloffset=+2]
+//include::modules/testing-clair.adoc[leveloffset=+1]
+include::modules/clair-updaters.adoc[leveloffset=+2]
+include::modules/clair-updater-urls.adoc[leveloffset=+3]
+include::modules/about-clair.adoc[leveloffset=+1]
+include::modules/clair-cve.adoc[leveloffset=+2]
+include::modules/fips-overview.adoc[leveloffset=+2]
+
+[id="testing-clair-with-quay"]
+= Clair on {productname}
+
+This guide contains procedures for running Clair on {productname} in both standalone and {ocp} Operator deployments.
+
+include::modules/clair-standalone-configure.adoc[leveloffset=+1]
+
+include::modules/clair-openshift.adoc[leveloffset=+1]
+// include::modules/clair-openshift-manual.adoc[leveloffset=+2]
+
+include::modules/clair-testing.adoc[leveloffset=+1]
+
+
+[id="advanced-clair-configuration"]
+= Advanced Clair configuration
+
+Use this section to configure advanced Clair features.
+
+include::modules/clair-unmanaged.adoc[leveloffset=+1]
+include::modules/unmanaging-clair-database.adoc[leveloffset=+2]
+include::modules/configuring-custom-clair-database.adoc[leveloffset=+2]
+
+include::modules/custom-clair-configuration-managed-database.adoc[leveloffset=+1]
+include::modules/managed-clair-database.adoc[leveloffset=+2]
+include::modules/configuring-custom-clair-database-managed.adoc[leveloffset=+2]
+
+include::modules/clair-disconnected.adoc[leveloffset=+1]
+
+
+include::modules/clair-clairctl.adoc[leveloffset=+2]
+include::modules/clair-openshift-config.adoc[leveloffset=+3]
+include::modules/clair-export-bundle.adoc[leveloffset=+3]
+include::modules/clair-openshift-airgap-database.adoc[leveloffset=+3]
+include::modules/clair-openshift-airgap-import-bundle.adoc[leveloffset=+3]
+
+
+include::modules/clair-clairctl-standalone.adoc[leveloffset=+2]
+include::modules/clair-standalone-config-location.adoc[leveloffset=+3]
+include::modules/clair-export-bundle-standalone.adoc[leveloffset=+3]
+include::modules/clair-openshift-airgap-database-standalone.adoc[leveloffset=+3]
+include::modules/clair-openshift-airgap-import-bundle-standalone.adoc[leveloffset=+3]
+
+include::modules/clair-crda-configuration.adoc[leveloffset=+2]
+include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+2]
\ No newline at end of file
diff --git a/clair/modules b/clair/modules
new file mode 120000
index 000000000..464b823ac
--- /dev/null
+++ b/clair/modules
@@ -0,0 +1 @@
+../modules
\ No newline at end of file
diff --git a/deploy_quay_on_openshift_op_tng/master.adoc b/deploy_quay_on_openshift_op_tng/master.adoc
index f9c679214..3ce0217cd 100644
--- a/deploy_quay_on_openshift_op_tng/master.adoc
+++ b/deploy_quay_on_openshift_op_tng/master.adoc
@@ -91,7 +91,7 @@ include::modules/operator-external-access.adoc[leveloffset=+2]
include::modules/operator-console-monitoring-alerting.adoc[leveloffset=+2]
-include::modules/clair-openshift-airgap-update.adoc[leveloffset=+2]
+include::modules/clair-disconnected.adoc[leveloffset=+2]
include::modules/clair-clairctl.adoc[leveloffset=+3]
==== Retrieving the Clair config
include::modules/clair-openshift-config.adoc[leveloffset=+4]
@@ -111,6 +111,8 @@ include::modules/operator-resize-storage.adoc[leveloffset=+2]
include::modules/operator-customize-images.adoc[leveloffset=+2]
include::modules/operator-cloudfront.adoc[leveloffset=+2]
include::modules/clair-unmanaged.adoc[leveloffset=+3]
+include::modules/unmanaging-clair-database.adoc[leveloffset=+4]
+include::modules/configuring-custom-clair-database.adoc[leveloffset=+4]
include::modules/build-enhancements.adoc[leveloffset=+1]
diff --git a/manage_quay/master.adoc b/manage_quay/master.adoc
index 87ceb0ef3..97a2098fd 100644
--- a/manage_quay/master.adoc
+++ b/manage_quay/master.adoc
@@ -58,16 +58,25 @@ include::modules/proc_manage-log-storage.adoc[leveloffset=+1]
:context: security-scanning
-include::modules/clair-intro2.adoc[leveloffset=+1]
+include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1]
include::modules/clair-openshift.adoc[leveloffset=+2]
include::modules/clair-openshift-manual.adoc[leveloffset=+3]
include::modules/clair-standalone.adoc[leveloffset=+2]
+
+[id="advanced-clair-configuration"]
+= Advanced Clair configuration
+
+Use the content in this section to configure advanced Clair settings.
+
include::modules/clair-unmanaged.adoc[leveloffset=+2]
+include::modules/unmanaging-clair-database.adoc[leveloffset=+3]
include::modules/clair-crda-configuration.adoc[leveloffset=+2]
include::modules/clair-using.adoc[leveloffset=+2]
include::modules/clair-cve.adoc[leveloffset=+2]
include::modules/clair-disconnected.adoc[leveloffset=+2]
-include::modules/clair-updater-urls.adoc[leveloffset=+2]
+include::modules/configuring-clair-disconnected-environment.adoc[leveloffset=+3]
+include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+3]
+include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2]
include::modules/clair-add-info.adoc[leveloffset=+2]
diff --git a/modules/about-clair.adoc b/modules/about-clair.adoc
new file mode 100644
index 000000000..07666ae6d
--- /dev/null
+++ b/modules/about-clair.adoc
@@ -0,0 +1,130 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: CONCEPT
+[id="about-clair"]
+= About Clair
+
+The content in this section highlights Clair releases, official Clair containers, and information about CVSS enrichment data.
+
+[id="clair-releases"]
+== Clair releases
+
+New versions of Clair are regularly released. The source code needed to build Clair is packaged as an archive and attached to each release. Clair releases can be found at link:https://github.com/quay/clair/releases[Clair releases].
+
+
+Release artifacts also include the `clairctl` command line interface tool, which obtains updater data from the internet by using an open host.
+
+[id="clair-supported-languages"]
+== Clair supported languages
+
+Clair supports the following languages:
+* Python
+* Java (CRDA must be enabled)
+
+[id="clair-containers"]
+== Clair containers
+
+Official downstream Clair containers bundled with {productname} can be found on the link:registry.redhat.io[Red Hat Ecosystem Catalog].
+
+Official upstream containers are packaged and released as a container at link:quay.io/projectquay/clair[Quay.io/projectquay/clair]. The latest tag tracks the Git development branch. Version tags are built from the corresponding release.
+
+////
+
+[id="notifier-pagination"]
+===== Notifier pagination
+
+The URL returned in the callback field takes the client to a paginated result.
+
+The following example shows the callback endpoint specification:
+[source,json]
+----
+GET /notifier/api/v1/notification/{id}?[page_size=N][next=N]
+{
+ page: {
+ size: int,
+ next: string, // if present, the next id to fetch.
+ }
+ notifications: [ Notification… ] // array of notifications; max len == page.size
+}
+----
+.small
+--
+* The `GET` callback request implements a simple paging mechanism.
+* A `page` object accompanying the notification list specifies `next` and `size` fields.
+* The `next` field returned in the page must be provided as the subsequent request's `next` URL parameter to retrieve the next set of notifications.
+* The `size` field will echo back to the request `page_size` parameter.
+
+
+
+* The `page_size` URL parameter controls how many notifications rae returned in a single page. If unprovided, a default of `500` is used.
+* The `next` URL parameter informs Clair of the next set of paginated notifications to return. If not provided, the `0th` page is assumed.
+*
+
+////
+
+////
+
+.Prerequisites
+
+* The Linux `make` command is required to start the local development environment.
+
+* Podman v3.0 or greater. Alternatively, you can use Docker or Docker Compose, however not all versions of Docker or Docker Compose have been tested. As a result, some versions might not work properly.
++
+This guide uses Podman with an implementation of Compose Specification.
+
+* Go v1.16 or greater.
+
+.Procedure
+
+. Enter the following command to close the Clair github repository:
++
+[source,terminal]
+----
+$ git clone git@github.com:quay/clair.git
+----
+
+. Change into the Clair directory by entering the following command:
++
+[source,terminal]
+----
+$ cd clair
+----
+
+. Start the Clair container by entering the following command:
++
+[source,terminal]
+----
+$ podman-compose up -d
+----
+
+After the local development environment starts, the following infrastructure is available to you:
+
+* `localhost:8080`. This includes dashboards and debugging services. You can see Traefik configuration logs in `local-dev/traefik`, where various services are served.
+
+* `localhost:6060`. This includes Clair services.
+
+* {productname}. If started, {productname} will be started in a single node, local storage configuration. A random port will be forwarded from `localhost`. Use `podman port` to view mapping information.
+
+* PostgreSQL. PostgreSQL has a random port forwarded from `localhost` to the database server. See `local-dev/clair/init.sql` for credentials and permissions. Use `podman port` to view mapping information.
+
+[id="testing-clair"]
+== Testing Clair on the local development environment
+
+After starting the Clair container, a {productname} server is forwarded to a random port on the host.
+
+. Locate, and open, the port hosting {productname}.
+
+. Click *Create Account* and create a new user, for example, `admin`.
+
+. Set a password.
+
+. To push to the {productname} container, you must exec into the skopeo container. For example:
++
+[source,terminal]
+----
+$ podman exec -it quay-skopeo /usr/bin/skopeo copy --dst-creds ':' --dst-tls-verify=false clair-quay:8080//:
+----
+
+////
\ No newline at end of file
diff --git a/modules/clair-airgap.adoc b/modules/clair-airgap.adoc
new file mode 100644
index 000000000..553b79a7e
--- /dev/null
+++ b/modules/clair-airgap.adoc
@@ -0,0 +1,5 @@
+:_content-type: CONCEPT
+[id="clair-airgap"]
+== Air gapped Clair
+
+For flexability, Clair supports running updaters in a separate environment and importing the results. This is aimed at supporting installations that reject the Clair cluster from communication with the internet directly.
diff --git a/modules/clair-authentication.adoc b/modules/clair-authentication.adoc
new file mode 100644
index 000000000..a2f3a6276
--- /dev/null
+++ b/modules/clair-authentication.adoc
@@ -0,0 +1,31 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: CONCEPT
+[id="clair-authentication"]
+= Clair authentication
+
+In its current iteration, Clair v4 (Clair) handles authentication internally.
+
+[NOTE]
+====
+Previous versions of Clair used JWT Proxy to gate authentication.
+====
+
+Authentication is configured by specifying configuration objects underneath the `auth` key of the configuration. Multiple authentication configurations might be present, but they are used preferentially in the following order:
+
+. PSK. With this authentication configuration, Clair implements JWT-based authentication using a pre-shared key.
+
+. Configuration. For example:
++
+[source,yaml]
+----
+auth:
+ psk:
+ key: >-
+ MDQ4ODBlNDAtNDc0ZC00MWUxLThhMzAtOTk0MzEwMGQwYTMxCg==
+ iss: 'issuer'
+----
++
+In this configuration the `auth` field requires two parameters: `iss`, which is the issuer to validate all incoming requests, and `key`, which is a base64 coded symmetric key for validating the requests.
\ No newline at end of file
diff --git a/modules/clair-clairctl-standalone.adoc b/modules/clair-clairctl-standalone.adoc
new file mode 100644
index 000000000..501e5057f
--- /dev/null
+++ b/modules/clair-clairctl-standalone.adoc
@@ -0,0 +1,30 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
+[id="clair-disconnected-standalone-configuration"]
+= Setting up a self-managed deployment of Clair for a disconnected {ocp} cluster
+
+Use the following procedures to set up a self-managed deployment of Clair for a disconnected {ocp} cluster.
+
+[id="clair-clairctl-standalone"]
+== Installing the clairctl command line utility tool for a self-managed Clair deployment on {ocp}
+
+Use the following procedure to install the `clairctl` CLI tool for self-managed Clair deployments on {ocp}.
+
+.Procedure
+
+. Install the `clairctl` program for a self-managed Clair deployment by using the `podman cp` command, for example:
++
+[source,terminal]
+----
+$ sudo podman cp clairv4:/usr/bin/clairctl ./clairctl
+----
+
+. Set the permissions of the `clairctl` file so that it can be executed and run by the user, for example:
++
+[source,terminal]
+----
+$ chmod u+x ./clairctl
+----
\ No newline at end of file
diff --git a/modules/clair-clairctl.adoc b/modules/clair-clairctl.adoc
index 3c2770417..8382a3fa9 100644
--- a/modules/clair-clairctl.adoc
+++ b/modules/clair-clairctl.adoc
@@ -1,17 +1,35 @@
-[[clair-clairctl]]
-= Obtaining clairctl
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-To obtain the `clairctl` program from a Clair deployment in an OpenShift cluster, use the `oc cp` command, for example:
+:_content-type: PROCEDURE
+[id="clair-disconnected-ocp-configuration"]
+= Setting up Clair in a disconnected {ocp} cluster
-----
-$ oc -n quay-enterprise cp example-registry-clair-app-64dd48f866-6ptgw:/usr/bin/clairctl ./clairctl
-$ chmod u+x ./clairctl
-----
+Use the following procedures to set up an {ocp} provisioned Clair pod in a disconnected {ocp} cluster.
+
+[id="clair-clairctl-ocp"]
+== Installing the clairctl command line utility tool for {ocp} deployments
-For a standalone Clair deployment, use the `podman cp` command, for example:
+Use the following procedure to install the `clairctl` CLI tool for {ocp} deployments.
+.Procedure
+
+. Install the `clairctl` program for a Clair deployment in an {ocp} cluster by using the `oc cp` command, for example:
++
+[source,terminal]
----
-$ sudo podman cp clairv4:/usr/bin/clairctl ./clairctl
-$ chmod u+x ./clairctl
+$ oc -n quay-enterprise exec example-registry-clair-app-64dd48f866-6ptgw -- cat /usr/bin/clairctl > clairctl
----
++
+[NOTE]
+====
+Unofficially, the `clairctl` tool can be downloaded
+====
+. Set the permissions of the `clairctl` file so that it can be executed and run by the user, for example:
++
+[source,terminal]
+----
+$ chmod u+x ./clairctl
+----
\ No newline at end of file
diff --git a/modules/clair-concepts.adoc b/modules/clair-concepts.adoc
new file mode 100644
index 000000000..835535f77
--- /dev/null
+++ b/modules/clair-concepts.adoc
@@ -0,0 +1,153 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: CONCEPT
+[id="clair-concepts"]
+= Clair concepts
+
+The following sections provide a conceptual overview of how Clair works.
+
+[id="clair-practice"]
+== Clair in practice
+
+A Clair analysis is broken down into three distinct parts: indexing, matching, and notification.
+
+[id="clair-indexing-concept"]
+=== Indexing
+
+Clair's indexer service is responsible for indexing a manifest. In Clair, manifests are representations of a container image. The indexer service is the component that Clair uses to understand the contents of layers. Clair leverages the fact that Open Container Initiative (OCI) manifests and layers are content addressed to reduce duplicate work.
+
+Indexing involves taking a manifest representing a container image and computing its constituent parts. The indexer tries to discover what packages exist in the image, what distribution the image is derived from, and what package repositories are used within the image. When this information is computed, it is persisted into an `IndexReport`.
+
+The `IndexReport` is stored in Clair's database. It can be fed to a `matcher` node to compute the vulnerability report.
+
+[id="content-addressability"]
+==== Content addressability
+
+Clair treats all manifests and layers as _content addressable_. In the context of Clair, content addressable means that when a specific manifest is indexed, it is not indexed again unless it is required; this is the same for individual layers.
+
+For example, consider how many images in a registry might use `ubuntu:artful` as a base layer. If the developers prefer basing their images off of Ubuntu, it could be a large majority of images. Treating the layers and manifests as content addressable means that Clair only fetches and analyzes the base layer one time.
+
+In some cases, Clair should re-index a manifest. For example, when an internal component such as a package scanner is updated, Clair performs the analysis with the new package scanner. Clair has enough information to determine that a component has changed and that the `IndexReport` might be different the second time, and as a result it re-indexes the manifest.
+
+A client can track Clair's `index_state` endpoint to understand when an internal component has changed, and can subsequently issue re-indexes. See the Clair API guide to learn how to view Clair's API specification.
+
+[id="clair-matching-concept"]
+=== Matching
+
+With Clair, a matcher node is responsible for matching vulnerabilities to a provided `IndexReport`.
+
+Matchers are responsible for keeping the database of vulnerabilities up to date. Matchers will typically run a set of updaters, which periodically probe their data sources for new content. New vulnerabilities are stored in the database when they are discovered.
+
+The matcher API is designed to be used often. It is designed to always provide the most recent `VulnerabilityReport` when queried. The `VulnerabilityReport` summarizes both a manifest's content and any vulnerabilities affecting the content.
+
+// See. . . to learn more about how to view the Clair API specification and to work with the matcher API.
+
+[id="remote-matching"]
+==== Remote matching
+
+A remote matcher acts similar to a matcher, however remote matchers use API calls to fetch vulnerability data for a provided `IndexReport`. Remote matchers are useful when it is impossible to persist data from a given source into the database.
+
+The CRDA remote matcher is responsible for fetching vulnerabilities from Red Hat Code Ready Dependency Analytics (CRDA). By default, this matcher serves 100 requests per minute. The rate limiting can be lifted by requesting a dedicated API key, which is done by submitting link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form].
+
+To enable CRDA remote matching, see "Enabling CRDA for Clair".
+
+[id="clair-notifications-concept"]
+=== Notifications
+
+Clair uses a notifier service that keeps track of new security database updates and informs users if new or removed vulnerabilities affect an indexed manifest.
+
+When the notifier becomes aware of new vulnerabilities affecting a previously indexed manifest, it uses the configured methods in your `config.yaml` file to issue notifications about the new changes. Returned notifications express the most severe vulnerability discovered because of the change. This avoids creating excessive notifications for the same security database update.
+
+When a user receives a notification, it issues a new request against the matcher to receive an up to date vulnerability report.
+
+The notification schema is the JSON marshalled form of the following types:
+
+[source,json]
+----
+// Reason indicates the catalyst for a notification
+type Reason string
+const (
+ Added Reason = "added"
+ Removed Reason = "removed"
+ Changed Reason = "changed"
+)
+type Notification struct {
+ ID uuid.UUID `json:"id"`
+ Manifest claircore.Digest `json:"manifest"`
+ Reason Reason `json:"reason"`
+ Vulnerability VulnSummary `json:"vulnerability"`
+}
+type VulnSummary struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Package *claircore.Package `json:"package,omitempty"`
+ Distribution *claircore.Distribution `json:"distribution,omitempty"`
+ Repo *claircore.Repository `json:"repo,omitempty"`
+ Severity string `json:"severity"`
+ FixedInVersion string `json:"fixed_in_version"`
+ Links string `json:"links"`
+}
+----
+
+You can subscribe to notifications through the following mechanics:
+
+* Webhook delivery
+* AMQP delivery
+* STOMP delivery
+
+Configuring the notifier is done through the Clair YAML configuration file.
+
+[id=webhook-delivery]
+==== Webhook delivery
+
+When you configure the notifier for webhook delivery, you provide the service with the following pieces of information:
+
+* A target URL where the webhook will fire.
+* The callback URL where the notifier might be reached, including its API path. For example, `http://clair-notifier/notifier/api/v1/notifications`.
+
+When the notifier has determined an updated security database has been changed the affected status of an indexed manifest, it delivers the following JSON body to the configured target:
+
+[source,json]
+----
+{
+ "notifiction_id": {uuid_string},
+ "callback": {url_to_notifications}
+}
+----
+
+On receipt, the server can browse to the URL provided in the callback field.
+
+[id="amqp-delivery"]
+==== AMQP delivery
+
+The Clair notifier also supports delivering notifications to an AMQP broker. With AMQP delivery, you can control whether a callback is delivered to the broker or whether notifications are directly delivered to the queue. This allows the developer of the AMQP consumer to determine the logic of notification processing.
+
+[NOTE]
+====
+AMQP delivery only supports AMQP 0.x protocol (for example, RabbitMQ). If you need to publish notifications to AMQP 1.x message queue (for example, ActiveMQ), you can use STOMP delivery.
+====
+
+[id="amqp-direct-delivery"]
+===== AMQP direct delivery
+
+If the Clair notifier's configuration specifies `direct: true` for AMQP delivery, notifications are delivered directly to the configured exchange.
+
+When `direct` is set, the `rollup` property might be set to instruct the notifier to send a maximum number of notifications in a single AMQP. This provides balance between the size of the message and the number of messages delivered to the queue.
+
+[id="notifier-testing-development"]
+==== Notifier testing and development mode
+
+The notifier has a testing and development mode that can be enabled with the `NOTIFIER_TEST_MODE` parameter. This parameter can be set to any value.
+
+When the `NOTIFIER_TEST_MODE` parameter is set, the notifier begins sending fake notifications to the configured delivery mechanism every `poll_interval` interval. This provides an easy way to implement and test new or existing deliverers.
+
+The notifier runs in `NOTIFIER_TEST_MODE` until the environment variable is cleared and the service is restarted.
+
+[id="deleting-notifications"]
+==== Deleting notifications
+
+To delete the notification, you can use the `DELETE` API call. Deleting a notification ID manually cleans up resources in the notifier. If you do not use the `DELETE` API call, the notifier waits a predetermined length of time before clearing delivered notifications from its database.
+
+// For more information on the `DELETE` API call, see. . .
\ No newline at end of file
diff --git a/modules/clair-crda-configuration.adoc b/modules/clair-crda-configuration.adoc
index c2583df22..975ca1997 100644
--- a/modules/clair-crda-configuration.adoc
+++ b/modules/clair-crda-configuration.adoc
@@ -1,10 +1,14 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
[id="clair-crda-configuration"]
-= Clair CRDA configuration
+= Enabling Clair CRDA
-[id="enabling-clair-crda"]
-== Enabling Clair CRDA
+Java scanning depends on a public, Red Hat provided API service called Code Ready Dependency Analytics (CRDA). CRDA is only available with internet access and is not enabled by default.
-Java scanning depends on a public, Red Hat provided API service called Code Ready Dependency Analytics (CRDA). CRDA is only available with internet access and is not enabled by default. Use the following procedure to integrate the CRDA service with a custom API key and enable CRDA for Java and Python scanning.
+Use the following procedure to integrate the CRDA service with a custom API key and enable CRDA for Java and Python scanning.
.Prerequisites
@@ -26,5 +30,5 @@ matchers:
source: <2>
----
+
-<1> Insert the Quay-specific CRDA remote matcher from link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form] here.
-<2> The hostname of your Quay server.
\ No newline at end of file
+<1> Insert the Quay-specific CRDA remote matcher from link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form] here.
+<2> The hostname of your Quay server.
\ No newline at end of file
diff --git a/modules/clair-cve.adoc b/modules/clair-cve.adoc
index 657154b37..5c9142bae 100644
--- a/modules/clair-cve.adoc
+++ b/modules/clair-cve.adoc
@@ -1,11 +1,14 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
:_content-type: CONCEPT
[id="clair-cve"]
= CVE ratings from the National Vulnerability Database
-With Clair v4.2, CVSS enrichment data is now viewable in the Quay UI.
-Additionally, Clair v4.2 adds CVSS scores from the National Vulnerability Database for detected vulnerabilities.
+As of Clair v4.2, Common Vulnerability Scoring System (CVSS) enrichment data is now viewable in the {productname} UI. Additionally, Clair v4.2 adds CVSS scores from the National Vulnerability Database for detected vulnerabilities.
-With this change, if the vulnerability has a CVSS score that is within 2 levels of the distribution score, the Quay UI present's the distro's score by default. For example:
+With this change, if the vulnerability has a CVSS score that is within 2 levels of the distribution score, the {productname} UI present's the distribution's score by default. For example:
image:clair-4-2-enrichment-data.png[Clair v4.2 data display]
diff --git a/modules/clair-disconnected.adoc b/modules/clair-disconnected.adoc
index 1a833c14b..4241fe660 100644
--- a/modules/clair-disconnected.adoc
+++ b/modules/clair-disconnected.adoc
@@ -1,78 +1,18 @@
-:_content-type: CONCEPT
-[id="configuring-clair-disconnected-environments"]
-= Configuring Clair for disconnected environments
-
-Clair uses a set of components called `Updaters` to handle the fetching and parsing of data from various vulnerability databases. `Updaters` are set up by default to pull vulnerability data directly from the internet and work for immediate use. For customers in disconnected environments without direct access to the internet, this poses a problem. Clair supports these environments by working with different types of update workflows that take into account network isolation. Using the `clairctl` command line utility, any process can easily fetch `Updater` data from the internet by using an open host, securely transfer the data to an isolated host, and then import the `Updater` data on the isolated host into Clair itself.
-
-[NOTE]
-====
-Currently, Clair enrichment data is CVSS data. Enrichment data is currently unsupported in disconnected environments.
-====
-
-Use the following procedure to configure Clair for a disconnected environment.
-
-.Prerequisites
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-* You have installed the `clairctl` tool to be run as a binary, or by the Clair container image.
+:_content-type: CONCEPT
+[id="clair-disconnected-environments"]
+= Clair in disconnected environments
-.Procedure
+Clair uses a set of components called _updaters_ to handle the fetching and parsing of data from various vulnerability databases. Updaters are set up by default to pull vulnerability data directly from the internet and work for immediate use. However, some users might require {productname} to run in a disconnected environment, or an environment without direct access to the internet. Clair supports disconnected environments by working with different types of update workflows that take network isolation into consideration. This works by using the `clairctl` command line interface tool, which obtains updater data from the internet by using an open host, securely transferring the data to an isolated host, and then important the updater data on the isolated host into Clair.
-. In your `config.yaml` file, set your Clair configuration to disable `Updaters` from running:
-+
-.config.yaml
-[source,yaml]
-----
-matcher:
- disable_updaters: true
-----
+Use this guide to deploy Clair in a disconnected environment.
-. Export the latest `Updater` data to a local archive. The following command assumes that your Clair configuration is in `/etc/clairv4/config/config.yaml`
-+
-[subs="verbatim,attributes"]
-----
-$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml export-updaters /updaters/updaters.gz
-----
-+
[NOTE]
====
-You must explicitly reference the Clair configuration. This creates the `Updater` archive in `/etc/clairv4/updaters/updaters.gz`. To ensure that the archive was created without any errors from the source databases, you can use the `--strict` flag with `clairctl`. The archive file should be copied over to a volume that is accessible from the disconnected host running Clair.
+Currently, Clair enrichment data is CVSS data. Enrichment data is currently unsupported in disconnected environments.
====
-. From the disconnected host, use the following command to import the archive into Clair:
-+
-[subs="verbatim,attributes"]
-----
-$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml import-updaters /updaters/updaters.gz
-----
-
-[id="mapping-repositories-to-cpe-information"]
-== Mapping repositories to Common Product Enumeration (CPE) information
-
-Clair's {rhel} scanner relies on a Common Product Enumeration (CPE) file to properly map RPM packages to the corresponding security data to produce matching results. The CPE file must be present, or access to the file must be allowed, for the scanner to properly process RPM packages. If the file is not present, RPM packages installed in the container image will not be scanned.
-
-* The `repos2cpe` JSON mapping file is published at link:https://www.redhat.com/security/data/metrics/repository-to-cpe.json[Red Hat Repository-to-CPE JSON].
-
-* The `name2repos` JSON mapping file is published at link:https://access.redhat.com/security/data/metrics/container-name-repos-map.json[Red Hat Name-to-Repos JSON].
-
-In addition to uploading CVE information to the database for disconnected Clair, you must also make the mapping file available locally:
-
-* For standalone {productname} and Clair deployments, the mapping file must be loaded into the Clair pod.
-* For Operator-based {productname} and Clair deployments, you must set the Clair component to `unamanged`. Then, Clair must be deployed manually, setting the configuration to load a local copy of the mapping file.
-
-In addition to uploading CVE information to the database for disconnected Clair, you must also make the mapping file available locally:
-
-Use the `repo2cpe_mapping_file` field in your Clair configuration to specify the file, for example,
-
-[source,yaml]
-----
-indexer:
- scanner:
- repo:
- rhel-repository-scanner:
- repo2cpe_mapping_file: /data/cpe-map.json
- package:
- rhel_containerscanner:
- name2repos_mapping_file: /data/repo-map.json <1>
-----
-
-For more information, see link:https://www.redhat.com/en/blog/how-accurately-match-oval-security-data-installed-rpms[How to accurately match OVAL security data to installed RPMs].
\ No newline at end of file
+For more information about Clair updaters, see "Clair updaters".
\ No newline at end of file
diff --git a/modules/clair-export-bundle-standalone.adoc b/modules/clair-export-bundle-standalone.adoc
new file mode 100644
index 000000000..bacdc3636
--- /dev/null
+++ b/modules/clair-export-bundle-standalone.adoc
@@ -0,0 +1,24 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
+[id="clair-export-bundle-standalone"]
+= Exporting the updaters bundle from a connected Clair instance
+
+Use the following procedure to export the updaters bundle from a Clair instance that has access to the internet.
+
+.Prerequisites
+
+* You have installed the `clairctl` command line utility tool.
+* You have deployed Clair.
+* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file.
+
+.Procedure
+
+* From a Clair instance that has access to the internet, use the `clairctl` CLI tool with your configuration file to export the updaters bundle. For example:
++
+[source,terminal]
+----
+$ ./clairctl --config ./config.yaml export-updaters updates.gz
+----
\ No newline at end of file
diff --git a/modules/clair-export-bundle.adoc b/modules/clair-export-bundle.adoc
index 23946906d..0c11ce7c2 100644
--- a/modules/clair-export-bundle.adoc
+++ b/modules/clair-export-bundle.adoc
@@ -1,9 +1,24 @@
-[[clair-export-bundle]]
-= Exporting the updaters bundle
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-From a Clair instance that has access to the internet, use `clairctl` with the appropriate configuration file to export the updaters bundle:
+:_content-type: PROCEDURE
+[id="clair-export-bundle"]
+= Exporting the updaters bundle from a connected Clair instance
+Use the following procedure to export the updaters bundle from a Clair instance that has access to the internet.
+
+.Prerequisites
+
+* You have installed the `clairctl` command line utility tool.
+* You have retrieved and decoded the Clair configuration secret, and saved it to a Clair `config.yaml` file.
+* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file.
+
+.Procedure
+
+* From a Clair instance that has access to the internet, use the `clairctl` CLI tool with your configuration file to export the updaters bundle. For example:
++
+[source,terminal]
----
$ ./clairctl --config ./config.yaml export-updaters updates.gz
-----
-
+----
\ No newline at end of file
diff --git a/modules/clair-intro2.adoc b/modules/clair-intro2.adoc
deleted file mode 100644
index 6a12a9bf2..000000000
--- a/modules/clair-intro2.adoc
+++ /dev/null
@@ -1,33 +0,0 @@
-[[clair-intro2]]
-= Clair Security Scanning
-
-Clair is a set of micro services that can be used with {productname}
-to perform vulnerability scanning of container images associated with a set of
-Linux operating systems. The micro services design of Clair makes it
-appropriate to run in a highly scalable configuration, where
-components can be scaled separately as appropriate for enterprise environments.
-
-Clair uses the following vulnerability databases to scan for issues in your images:
-
-* Alpine SecDB database
-* AWS UpdateInfo
-* Debian Oval database
-* Oracle Oval database
-* RHEL Oval database
-* SUSE Oval database
-* Ubuntu Oval database
-* Pyup.io (python) database
-
-For information on how Clair does security mapping with the different databases, see
-link:https://quay.github.io/claircore/concepts/severity_mapping.html[ClairCore Severity Mapping].
-
-[NOTE]
-====
-
-ifeval::["{productname}" == "Red Hat Quay"]
-With the release of {productname} 3.4, the new Clair V4 (image {productrepo}/{clairimage} fully replaces the prior Clair V2 (image quay.io/redhat/clair-jwt). See below for how to run V2 in read-only mode while V4 is updating.
-endif::[]
-ifeval::["{productname}" == "Project Quay"]
-With the release of Clair V4 (image clair), the previously used Clair V2 (image clair-jwt) is no longer used. See below for how to run V2 in read-only mode while V4 is updating.
-endif::[]
-====
diff --git a/modules/clair-openshift-airgap-database-standalone.adoc b/modules/clair-openshift-airgap-database-standalone.adoc
new file mode 100644
index 000000000..cfd03e0e0
--- /dev/null
+++ b/modules/clair-openshift-airgap-database-standalone.adoc
@@ -0,0 +1,63 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
+[id="clair-openshift-airgap-database-standalone"]
+= Configuring access to the Clair database in the disconnected {ocp} cluster
+
+Use the following procedure to configure access to the Clair database in your disconnected {ocp} cluster.
+
+.Prerequisites
+
+* You have installed the `clairctl` command line utility tool.
+* You have deployed Clair.
+* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file.
+* You have exported the updaters bundle from a Clair instance that has access to the internet.
+
+.Procedure
+
+. Determine your Clair database service by using the `oc` CLI tool, for example:
+[source,terminal]
++
+----
+$ oc get svc -n quay-enterprise
+----
++
+.Example output
++
+[source,terminal]
+----
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+example-registry-clair-app ClusterIP 172.30.224.93 80/TCP,8089/TCP 4d21h
+example-registry-clair-postgres ClusterIP 172.30.246.88 5432/TCP 4d21h
+...
+----
+
+. Forward the Clair database port so that it is accessible from the local machine. For example:
++
+[source,terminal]
+----
+$ oc port-forward -n quay-enterprise service/example-registry-clair-postgres 5432:5432
+----
+
+. Update your Clair `config.yaml` file, for example:
++
+[source,yaml]
+----
+indexer:
+ connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable <1>
+ scanlock_retry: 10
+ layer_scan_concurrency: 5
+ migrations: true
+ scanner:
+ repo:
+ rhel-repository-scanner: <2>
+ repo2cpe_mapping_file: /data/cpe-map.json
+ package:
+ rhel_containerscanner: <3>
+ name2repos_mapping_file: /data/repo-map.json
+----
+<1> Replace the value of the `host` in the multiple `connstring` fields with `localhost`.
+<2> For more information about the `rhel-repository-scanner` parameter, see "Mapping repositories to Common Product Enumeration information".
+<3> For more information about the `rhel_containerscanner` parameter, see "Mapping repositories to Common Product Enumeration information".
\ No newline at end of file
diff --git a/modules/clair-openshift-airgap-database.adoc b/modules/clair-openshift-airgap-database.adoc
index 282f92f1d..c0ca82b1e 100644
--- a/modules/clair-openshift-airgap-database.adoc
+++ b/modules/clair-openshift-airgap-database.adoc
@@ -1,44 +1,63 @@
-[[clair-openshift-airgap-database]]
-= Configuring access to the Clair database in the air-gapped OpenShift cluster
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-* Use `kubectl` to determine the Clair database service:
+:_content-type: PROCEDURE
+[id="clair-openshift-airgap-database"]
+= Configuring access to the Clair database in the disconnected {ocp} cluster
+
+Use the following procedure to configure access to the Clair database in your disconnected {ocp} cluster.
+
+.Prerequisites
+
+* You have installed the `clairctl` command line utility tool.
+* You have retrieved and decoded the Clair configuration secret, and saved it to a Clair `config.yaml` file.
+* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file.
+* You have exported the updaters bundle from a Clair instance that has access to the internet.
+
+.Procedure
+
+. Determine your Clair database service by using the `oc` CLI tool, for example:
+[source,terminal]
+
----
-$ kubectl get svc -n quay-enterprise
-
+$ oc get svc -n quay-enterprise
+----
++
+.Example output
++
+[source,terminal]
+----
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
example-registry-clair-app ClusterIP 172.30.224.93 80/TCP,8089/TCP 4d21h
example-registry-clair-postgres ClusterIP 172.30.246.88 5432/TCP 4d21h
...
----
-* Forward the Clair database port so that it is accessible from the local machine, for example:
+. Forward the Clair database port so that it is accessible from the local machine. For example:
+
+[source,terminal]
----
-$ kubectl port-forward -n quay-enterprise service/example-registry-clair-postgres 5432:5432
+$ oc port-forward -n quay-enterprise service/example-registry-clair-postgres 5432:5432
----
-* Update the Clair configuration file:
+. Update your Clair `config.yaml` file, for example:
+
-.clair-config.yaml
[source,yaml]
----
indexer:
- connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable
+ connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable <1>
scanlock_retry: 10
layer_scan_concurrency: 5
migrations: true
scanner:
repo:
- rhel-repository-scanner:
+ rhel-repository-scanner: <2>
repo2cpe_mapping_file: /data/cpe-map.json
package:
- rhel_containerscanner:
+ rhel_containerscanner: <3>
name2repos_mapping_file: /data/repo-map.json
----
-+
-[NOTE]
-====
-* Replace the value of the `host` in the multiple `connstring` fields with `localhost`.
-* As an alternative to using `kubectl port-forward`, you can use `kubefwd` instead. With this method, there is no need to modify the `connstring` field in the Clair configuration file to use `localhost`.
-====
+<1> Replace the value of the `host` in the multiple `connstring` fields with `localhost`.
+<2> For more information about the `rhel-repository-scanner` parameter, see "Mapping repositories to Common Product Enumeration information".
+<3> For more information about the `rhel_containerscanner` parameter, see "Mapping repositories to Common Product Enumeration information".
diff --git a/modules/clair-openshift-airgap-import-bundle-standalone.adoc b/modules/clair-openshift-airgap-import-bundle-standalone.adoc
new file mode 100644
index 000000000..577e85622
--- /dev/null
+++ b/modules/clair-openshift-airgap-import-bundle-standalone.adoc
@@ -0,0 +1,26 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
+[id="clair-openshift-airgap-import-bundle-standalone"]
+= Importing the updaters bundle into the disconnected {ocp} cluster
+
+Use the following procedure to import the updaters bundle into your disconnected {ocp} cluster.
+
+.Prerequisites
+
+* You have installed the `clairctl` command line utility tool.
+* You have deployed Clair.
+* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file.
+* You have exported the updaters bundle from a Clair instance that has access to the internet.
+* You have transferred the updaters bundle into your disconnected environment.
+
+.Procedure
+
+* Use the `clairctl` CLI tool to import the updaters bundle into the Clair database that is deployed by {ocp}:
++
+[source,terminal]
+----
+$ ./clairctl --config ./clair-config.yaml import-updaters updates.gz
+----
diff --git a/modules/clair-openshift-airgap-import-bundle.adoc b/modules/clair-openshift-airgap-import-bundle.adoc
index 8a4ab5807..c4f28b537 100644
--- a/modules/clair-openshift-airgap-import-bundle.adoc
+++ b/modules/clair-openshift-airgap-import-bundle.adoc
@@ -1,8 +1,27 @@
-[[clair-openshift-airgap-import-bundle]]
-= Importing the updaters bundle into the air-gapped environment
-After transferring the updaters bundle to the air-gapped environment, use `clairctl` to import the bundle into the Clair database deployed by the OpenShift Operator:
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+:_content-type: PROCEDURE
+[id="clair-openshift-airgap-import-bundle"]
+= Importing the updaters bundle into the disconnected {ocp} cluster
+
+Use the following procedure to import the updaters bundle into your disconnected {ocp} cluster.
+
+.Prerequisites
+
+* You have installed the `clairctl` command line utility tool.
+* You have retrieved and decoded the Clair configuration secret, and saved it to a Clair `config.yaml` file.
+* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file.
+* You have exported the updaters bundle from a Clair instance that has access to the internet.
+* You have transferred the updaters bundle into your disconnected environment.
+
+.Procedure
+
+* Use the `clairctl` CLI tool to import the updaters bundle into the Clair database that is deployed by {ocp}. For example:
++
+[source,terminal]
----
$ ./clairctl --config ./clair-config.yaml import-updaters updates.gz
----
diff --git a/modules/clair-openshift-config.adoc b/modules/clair-openshift-config.adoc
index 9ed25f036..4689bc893 100644
--- a/modules/clair-openshift-config.adoc
+++ b/modules/clair-openshift-config.adoc
@@ -1,43 +1,35 @@
-[[clair-openshift-config]]
-= Clair on OpenShift config
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-To retrieve the configuration file for a Clair instance deployed using the OpenShift Operator, retrieve and decode the config secret using the appropriate namespace, and save it to file, for example:
+:_content-type: PROCEDURE
+[id="clair-openshift-config"]
+= Retrieving and decoding the Clair configuration secret for Clair deployments on {ocp}
+Use the following procedure to retrieve and decode the configuration secret for an {ocp} provisioned Clair instance on {ocp}.
+
+.Prerequisites
+
+* You have installed the `clairctl` command line utility tool.
+
+.Procedure
+
+. Enter the following command to retrieve and decode the configuration secret, and then save it to a Clair configuration YAML:
++
+[source,terminal]
----
-$ kubectl get secret -n quay-enterprise example-registry-clair-config-secret -o "jsonpath={$.data['config\.yaml']}" | base64 -d > clair-config.yaml
+$ oc get secret -n quay-enterprise example-registry-clair-config-secret -o "jsonpath={$.data['config\.yaml']}" | base64 -d > clair-config.yaml
----
-An excerpt from a Clair configuration file is shown below:
-
-.clair-config.yaml
+. Update the `clair-config.yaml` file so that the `disable_updaters` and `airgap` parameters are set to `true`, for example:
++
[source,yaml]
----
-http_listen_addr: :8080
-introspection_addr: ""
-log_level: info
+---
indexer:
- connstring: host=example-registry-clair-postgres port=5432 dbname=postgres user=postgres password=postgres sslmode=disable
- scanlock_retry: 10
- layer_scan_concurrency: 5
- migrations: true
- scanner:
- package: {}
- dist: {}
- repo: {}
- airgap: false
+ airgap: true
+---
matcher:
- connstring: host=example-registry-clair-postgres port=5432 dbname=postgres user=postgres password=postgres sslmode=disable
- max_conn_pool: 100
- indexer_addr: ""
- migrations: true
- period: null
- disable_updaters: false
-notifier:
- connstring: host=example-registry-clair-postgres port=5432 dbname=postgres user=postgres password=postgres sslmode=disable
- migrations: true
- indexer_addr: ""
- matcher_addr: ""
- poll_interval: 5m
- delivery_interval: 1m
- ...
-----
+ disable_updaters: true
+---
+----
\ No newline at end of file
diff --git a/modules/clair-openshift-manual.adoc b/modules/clair-openshift-manual.adoc
index 11102cf02..4610a5e48 100644
--- a/modules/clair-openshift-manual.adoc
+++ b/modules/clair-openshift-manual.adoc
@@ -1,19 +1,28 @@
-[[clair-openshift-manual]]
-= Manually Deploying Clair
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-To configure Clair V4 on an existing {productname} OpenShift deployment running Clair V2, first ensure {productname} has been upgraded to at least version 3.4.0. Then use the following steps to manually set up Clair V4 alongside Clair V2.
+:_content-type: PROCEDURE
+[id="manually-deploy-clair-ocp"]
+= Setting up Clair on {productname} Operator deployment
-. Set your current project to the name of the project in which {productname} is running.
-For example:
+Use the following procedure to configure Clair on a {productname} {ocp} deployment.
+
+.Prerequisites
+
+* Your {productname} Operator has been upgraded to 3.4.0 or greater.
+
+.Procedure
+
+. Enter the following command to set your current project to the name of the project that is running {productname}:
+
-```
+[source,terminal]
+----
$ oc project quay-enterprise
-```
+----
-. Create a Postgres deployment file for Clair v4 (for example, `clairv4-postgres.yaml`)
-as follows.
+. Create a Postgres deployment file for Clair, for example, `clairv4-postgres.yaml`:
+
-.clairv4-postgres.yaml
[source,yaml]
----
---
@@ -70,6 +79,7 @@ spec:
requests:
storage: "5Gi"
volumeName: "clairv4-postgres"
+ storageClassName: <1>
---
apiVersion: v1
kind: Service
@@ -87,20 +97,21 @@ spec:
selector:
quay-component: clairv4-postgres
----
+<1> If left unspecified, defaults to `quay-storageclass`.
-. Deploy the postgres database as follows:
+. Enter the following command to the deploy the Postgres database:
+
-```
+[source,terminal]
+----
$ oc create -f ./clairv4-postgres.yaml
-```
+----
-. Create a Clair `config.yaml` file to use for Clair v4. For example:
+. Create a `config.yaml` file for Clair, for example:
+
-.config.yaml
[source,yaml]
----
introspection_addr: :8089
-http_listen_addr: :8080
+http_listen_addr: :8081
log_level: debug
indexer:
connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable
@@ -133,18 +144,17 @@ metrics:
name: "prometheus"
----
<1> To generate a Clair pre-shared key (PSK), enable `scanning` in the Security Scanner section of the User Interface and click `Generate PSK`.
-
++
More information about Clair's configuration format can be found in link:https://quay.github.io/clair/reference/config.html[upstream Clair documentation].
-. Create a secret from the Clair `config.yaml`:
+. Enter the following command to create a secret from the Clair `config.yaml` file:
+
-```
+----
$ oc create secret generic clairv4-config-secret --from-file=./config.yaml
-```
+----
-. Create the Clair v4 deployment file (for example, `clair-combo.yaml`) and modify it as necessary:
+. Create a deployment file for Clair, for example, `clair-combo.yaml`:
+
-.clair-combo.yaml
[source,yaml,subs="verbatim,attributes"]
----
---
@@ -211,37 +221,45 @@ spec:
quay-component: clair-combo
type: ClusterIP
----
-<1> Change image to latest clair image name and version.
-<2> With the Service set to clairv4, the scanner endpoint for Clair v4
-is entered later into the {productname} config.yaml in the
-`SECURITY_SCANNER_V4_ENDPOINT` as `\http://clairv4`.
+<1> Use the latest Clair image name and version.
+<2> With the `Service` set to `clairv4`, the scanner endpoint for Clair v4 is entered into the {productname} `config.yaml` file in the `SECURITY_SCANNER_V4_ENDPOINT` as `\http://clairv4`.
-. Create the Clair v4 deployment as follows:
+. Enter the following command to create the Clair deployment:
+
-```
+----
$ oc create -f ./clair-combo.yaml
-```
+----
-. Modify the `config.yaml` file for your {productname} deployment to add the following
-entries at the end:
+. Add the following entries to your `config.yaml` file for your {productname} deployment.
+
[source,yaml]
----
FEATURE_SECURITY_NOTIFICATIONS: true
FEATURE_SECURITY_SCANNER: true
-SECURITY_SCANNER_V4_ENDPOINT: http://clairv4 <1>
+SECURITY_SCANNER_V4_ENDPOINT: <1>
+SECURITY_SCANNER_V4_PSK: <2>
----
-<1> Identify the Clair v4 service endpoint
+<1> Obtained through the {productname} configuration tool. This parameter must be manually added if you do not use the {productname} configuration tool.
+<2> Obtained through the {productname} configuration tool. This parameter must be manually added if you do not use the {productname} configuration tool.
-. Redeploy the modified `config.yaml` to the secret containing that file
-(for example, `quay-enterprise-config-secret`:
+. Enter the following command to delete the original configuration secret for your `quay-enterprise` project:
+
-```
+[source,terminal]
+----
$ oc delete secret quay-enterprise-config-secret
-$ oc create secret generic quay-enterprise-config-secret --from-file=./config.yaml
-```
+----
-. For the new `config.yaml` to take effect, you need to restart the {productname} pods. Simply deleting the `quay-app` pods causes pods with the updated configuration to be deployed.
+. Deploy the modified `config.yaml` to the secret containing that file:
++
+[source,terminal]
+----
+$ oc create secret generic quay-enterprise-config-secret --from-file=./config.yaml
+----
-At this point, images in any of the organizations identified in the namespace whitelist will be scanned by Clair v4.
+. Restart your {productname} pods.
++
+[NOTE]
+====
+Deleting the `quay-app` pods causes pods with the updated configuration to be deployed.
+====
\ No newline at end of file
diff --git a/modules/clair-openshift.adoc b/modules/clair-openshift.adoc
index f113bfae7..cd4cb3f17 100644
--- a/modules/clair-openshift.adoc
+++ b/modules/clair-openshift.adoc
@@ -1,5 +1,9 @@
-[[clair-openshift]]
-= Setting Up Clair on a {productname} OpenShift deployment
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-== Deploying Via the Quay Operator
-To set up Clair V4 on a new {productname} deployment on OpenShift, it is highly recommended to use the Quay Operator. By default, the Quay Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair security scanning automatically.
+:_content-type: CONCEPT
+[id="clair-quay-operator-overview"]
+= Clair on {ocp}
+
+To set up Clair v4 (Clair) on a {productname} deployment on {ocp}, it is recommended to use the {productname} Operator. By default, the {productname} Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair automatically.
diff --git a/modules/clair-standalone-config-location.adoc b/modules/clair-standalone-config-location.adoc
index 4e6b9b8ea..18dc6e628 100644
--- a/modules/clair-standalone-config-location.adoc
+++ b/modules/clair-standalone-config-location.adoc
@@ -1,13 +1,47 @@
-[[clair-standalone-config-location]]
-= Standalone Clair config
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-For standalone Clair deployments, the config file is the one specified in CLAIR_CONF environment variable in the `podman run` command, for example:
+:_content-type: PROCEDURE
+[id="clair-standalone-config-location"]
+= Deploying a self-managed Clair container for disconnected {ocp} clusters
+Use the following procedure to deploy a self-managed Clair container for disconnected {ocp} clusters.
+
+.Prerequisites
+
+* You have installed the `clairctl` command line utility tool.
+
+.Procedure
+
+. Create a folder for your Clair configuration file, for example:
++
+[source,terminal]
+----
+$ mkdir /etc/clairv4/config/
+----
+
+. Create a Clair configuration file with the `disable_updaters` parameter set to `true`, for example:
++
+[source,yaml]
+----
+---
+indexer:
+ airgap: true
+---
+matcher:
+ disable_updaters: true
+---
+----
+
+. Start Clair by using the container image, mounting in the configuration from the file you created:
++
[subs="verbatim,attributes"]
-....
-sudo podman run -d --rm --name clairv4 \
- -p 8081:8081 -p 8089:8089 \
- -e CLAIR_CONF=/clair/config.yaml -e CLAIR_MODE=combo \
- -v /etc/clairv4/config:/clair:Z \
- {productrepo}/{clairimage}:{productminv}
-....
+----
+$ sudo podman run -it --rm --name clairv4 \
+-p 8081:8081 -p 8088:8088 \
+-e CLAIR_CONF=/clair/config.yaml \
+-e CLAIR_MODE=combo \
+-v /etc/clairv4/config:/clair:Z \
+{productrepo}/{clairimage}:{productminv}
+----
diff --git a/modules/clair-standalone-configure.adoc b/modules/clair-standalone-configure.adoc
new file mode 100644
index 000000000..0259e49c0
--- /dev/null
+++ b/modules/clair-standalone-configure.adoc
@@ -0,0 +1,131 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
+[id="clair-standalone-configure"]
+= Setting up Clair on standalone {productname} deployments
+
+For standalone {productname} deployments, you can set up Clair manually.
+
+.Procedure
+
+. In your {productname} installation directory, create a new directory for the Clair database data:
++
+[source,terminal]
+----
+$ mkdir /home//quay-poc/postgres-clairv4
+----
+
+. Set the appropriate permissions for the `postgres-clairv4` file by entering the following command:
++
+[source,terminal]
+----
+$ setfacl -m u:26:-wx /home//quay-poc/postgres-clairv4
+----
+
+. Deploy a Clair Postgres database by entering the following command:
++
+[source,terminal]
+----
+$ sudo podman run -d --name postgresql-clairv4 \
+ -e POSTGRESQL_USER=clairuser \
+ -e POSTGRESQL_PASSWORD=clairpass \
+ -e POSTGRESQL_DATABASE=clair \
+ -e POSTGRESQL_ADMIN_PASSWORD=adminpass \
+ -p 5433:5433 \
+ -v /home//quay-poc/postgres-clairv4:/var/lib/pgsql/data:Z \
+ registry.redhat.io/rhel8/postgresql-10:1
+----
+
+. Install the Postgres `uuid-ossp` module for your Clair deployment:
++
+[source,terminal]
+----
+$ podman exec -it postgresql-clairv4 /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"" | psql -d clair -U postgres'
+----
++
+.Example output
+[source,terminal]
+----
+CREATE EXTENSION
+----
++
+[NOTE]
+====
+Clair requires the `uuid-ossp` extension to be added to its Postgres database. For users with proper privileges, creating the extension will automatically be added by Clair. If users do not have the proper privileges, the extension must be added before start Clair.
+
+If the extension is not present, the following error will be displayed when Clair attempts to start: `ERROR: Please load the "uuid-ossp" extension. (SQLSTATE 42501)`.
+====
+
+. Create a folder for your Clair configuration file, for example:
++
+[source,terminal]
+----
+$ mkdir /etc/opt/clairv4/config/
+----
+
+. Change into the Clair configuration folder:
++
+[source,terminal]
+----
+$ cd /etc/opt/clairv4/config/
+----
+
+. Create a Clair configuration file, for example:
++
+[source,yaml]
+----
+http_listen_addr: :8081
+introspection_addr: :8088
+log_level: debug
+indexer:
+ connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable
+ scanlock_retry: 10
+ layer_scan_concurrency: 5
+ migrations: true
+matcher:
+ connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable
+ max_conn_pool: 100
+ run: ""
+ migrations: true
+ indexer_addr: clair-indexer
+notifier:
+ connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable
+ delivery_interval: 1m
+ poll_interval: 5m
+ migrations: true
+auth:
+ psk:
+ key: "MTU5YzA4Y2ZkNzJoMQ=="
+ iss: ["quay"]
+# tracing and metrics
+trace:
+ name: "jaeger"
+ probability: 1
+ jaeger:
+ agent_endpoint: "localhost:6831"
+ service_name: "clair"
+metrics:
+ name: "prometheus"
+----
++
+For more information about Clair's configuration format, see link:https://quay.github.io/clair/reference/config.html[Clair configuration reference].
+
+. Start Clair by using the container image, mounting in the configuration from the file you created:
++
+[subs="verbatim,attributes"]
+----
+$ sudo podman run -d --name clairv4 \
+-p 8081:8081 -p 8088:8088 \
+-e CLAIR_CONF=/clair/config.yaml \
+-e CLAIR_MODE=combo \
+-v /etc/opt/clairv4/config:/clair:Z \
+{productrepo}/{clairimage}:{productminv}
+----
++
+[NOTE]
+====
+Running multiple Clair containers is also possible, but for deployment scenarios beyond a single container the use of a container orchestrator like Kubernetes or {ocp} is strongly recommended.
+====
+
diff --git a/modules/clair-standalone-using.adoc b/modules/clair-standalone-using.adoc
index 6d39b5b44..f3705ffc8 100644
--- a/modules/clair-standalone-using.adoc
+++ b/modules/clair-standalone-using.adoc
@@ -21,8 +21,8 @@ $ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/ubuntu:2
The results from the security scanning can be seen in the Quay UI, as shown in the following images:
-.Scanning summary
+.Report summary
image:poc-clair-1.png[Scanning summary]
-.Scanning details
+.Report details
image:poc-clair-2.png[Scanning details]
\ No newline at end of file
diff --git a/modules/clair-standalone.adoc b/modules/clair-standalone.adoc
deleted file mode 100644
index 12aa4288a..000000000
--- a/modules/clair-standalone.adoc
+++ /dev/null
@@ -1,60 +0,0 @@
-[[clair-standalone]]
-= Setting up Clair on a non-OpenShift {productname} deployment
-
-For {productname} deployments not running on OpenShift, it is possible to configure Clair security scanning manually. {productname} deployments already running Clair V2 can use the instructions below to add Clair V4 to their deployment.
-
-. Deploy a (preferably fault-tolerant) Postgres database server. Note that Clair requires the `uuid-ossp` extension to be added to its Postgres database. If the user supplied in Clair's `config.yaml` has the necessary privileges to create the extension then it will be added automatically by Clair itself. If not, then the extension must be added before starting Clair. If the extension is not present, the following error will be displayed when Clair attempts to start.
-+
-```
-ERROR: Please load the "uuid-ossp" extension. (SQLSTATE 42501)
-```
-+
-. Create a Clair config file in a specific folder, for example, `/etc/clairv4/config/config.yaml`).
-+
-.config.yaml
-[source,yaml]
-----
-introspection_addr: :8089
-http_listen_addr: :8080
-log_level: debug
-indexer:
- connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable
- scanlock_retry: 10
- layer_scan_concurrency: 5
- migrations: true
-matcher:
- connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable
- max_conn_pool: 100
- run: ""
- migrations: true
- indexer_addr: clair-indexer
-notifier:
- connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable
- delivery_interval: 1m
- poll_interval: 5m
- migrations: true
-
-# tracing and metrics
-trace:
- name: "jaeger"
- probability: 1
- jaeger:
- agent_endpoint: "localhost:6831"
- service_name: "clair"
-metrics:
- name: "prometheus"
-----
-
-More information about Clair's configuration format can be found in link:https://quay.github.io/clair/reference/config.html[upstream Clair documentation].
-
-. Run Clair via the container image, mounting in the configuration from the file you created.
-+
-[subs="verbatim,attributes"]
-```
-$ podman run -p 8080:8080 -p 8089:8089 -e CLAIR_CONF=/clair/config.yaml -e CLAIR_MODE=combo -v /etc/clair4/config:/clair -d {productrepo}/{clairimage}:{productminv}
-```
-
-. Follow the remaining instructions from the previous section for configuring {productname} to use the new Clair V4 endpoint.
-
-Running multiple Clair containers in this fashion is also possible, but for deployment scenarios beyond a single container the use of a container orchestrator like Kubernetes or OpenShift is strongly recommended.
-
diff --git a/modules/clair-testing.adoc b/modules/clair-testing.adoc
new file mode 100644
index 000000000..98fb0b854
--- /dev/null
+++ b/modules/clair-testing.adoc
@@ -0,0 +1,50 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
+[id="clair-testing"]
+= Testing Clair
+
+Use the following procedure to test Clair on either a standalone {productname} deployment, or on an {ocp} Operator-based deployment.
+
+.Prerequisites
+
+* You have deployed the Clair container image.
+
+.Procedure
+
+. Pull a sample image by entering the following command:
++
+[source,terminal]
+----
+$ podman pull ubuntu:20.04
+----
+
+. Tag the image to your registry by entering the following command:
++
+[source,terminal]
+----
+$ sudo podman tag docker.io/library/ubuntu:20.04 //ubuntu:20.04
+----
+
+. Push the image to your {productname} registry by entering the following command:
++
+[source,terminal]
+----
+$ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/ubuntu:20.04
+----
+
+. Log in to your {productname} deployment through the UI.
+
+. Click the repository name, for example, *quayadmin/ubuntu*.
+
+. In the navigation pane, click *Tags*.
++
+.Report summary
+image:clair-reposcan.png[Security scan information appears for scanned repository images]
+
+. Click the image report, for example, *45 medium*, to show a more detailed report:
++
+.Report details
+image:clair-vulnerabilities.png[See all vulnerabilities or only those that are fixable]
\ No newline at end of file
diff --git a/modules/clair-unmanaged.adoc b/modules/clair-unmanaged.adoc
index 208c73935..dadda3417 100644
--- a/modules/clair-unmanaged.adoc
+++ b/modules/clair-unmanaged.adoc
@@ -1,113 +1,11 @@
-[[clair-unmanaged]]
-= Advanced Clair configuration
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-== Unmanaged Clair configuration
+:_content-type: CONCEPT
+[id="unmanaged-clair-configuration"]
+= Unmanaged Clair configuration
-With {productname} 3.7, users can run an unmanaged Clair configuration on the {productname} OpenShift Container Platform Operator. This feature allows users to create an unmanaged Clair database, or run their custom Clair configuration without an unmanaged database.
+{productname} users can run an unmanaged Clair configuration with the {productname} {ocp} Operator. This feature allows users to create an unmanaged Clair database, or run their custom Clair configuration without an unmanaged database.
-=== Unmanaging a Clair database
-
-An unmanaged Clair database allows the {productname} Operator to work in a link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/georepl-intro[geo-replicated environment], where multiple instances of the Operator must communicate with the same database. An unmanaged Clair database can also be used when a user requires a highly-available (HA) Clair database that exists outside of a cluster.
-
-.Procedure
-
-* In the Quay Operator, set the `clairpostgres` component of the QuayRegistry custom resource to unmanaged:
-+
-[source,yaml]
-----
-apiVersion: quay.redhat.com/v1
-kind: QuayRegistry
-metadata:
- name: quay370
-spec:
- configBundleSecret: config-bundle-secret
- components:
- - kind: objectstorage
- managed: false
- - kind: route
- managed: true
- - kind: tls
- managed: false
- - kind: clairpostgres
- managed: false
-----
-
-=== Configuring a custom Clair database
-
-The {productname} Operator for OpenShift Container Platform allows users to provide their own Clair configuration by editing the `configBundleSecret` parameter.
-
-.Procedure
-
-. Create a Quay config bundle secret that includes the `clair-config.yaml`:
-+
-[source,terminal]
-----
-$ oc create secret generic --from-file config.yaml=./config.yaml --from-file extra_ca_cert_rds-ca-2019-root.pem=./rds-ca-2019-root.pem --from-file clair-config.yaml=./clair-config.yaml --from-file ssl.cert=./ssl.cert --from-file ssl.key=./ssl.key config-bundle-secret
-----
-+
-Example `clair-config.yaml` configuration:
-+
-[source,yaml]
-----
-indexer:
- connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca
- layer_scan_concurrency: 6
- migrations: true
- scanlock_retry: 11
-log_level: debug
-matcher:
- connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca
- migrations: true
-metrics:
- name: prometheus
-notifier:
- connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca
- migrations: true
-----
-+
-[NOTE]
-====
-* The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`.
-* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config].
-====
-
-. Add the `clair-config.yaml` to your bundle secret, named `configBundleSecret`:
-+
-[source,yaml]
-----
-apiVersion: v1
-kind: Secret
-metadata:
- name: config-bundle-secret
- namespace: quay-enterprise
-data:
- config.yaml:
- clair-config.yaml:
- extra_ca_cert_:
- clair-ssl.crt: >-
- clair-ssl.key: >-
-----
-+
-[NOTE]
-====
-When updated, the provided `clair-config.yaml` is mounted into the Clair pod. Any fields not provided are automatically populated with defaults using the Clair configuration module.
-====
-
-After proper configuration, the Clair application pod should return to a `Ready` state.
-
-== Running a custom Clair configuration with a `managed` database
-
-In some cases, users might want to run a custom Clair configuration with a `managed` database. This is useful in the following scenarios:
-
-* When a user wants to disable an updater.
-* When a user is running in an air-gapped environment.
-+
-[NOTE]
-====
-* If you are running Quay in an air-gapped environment, the `airgap` parameter of your `clair-config.yaml` must be set to `true`.
-* If you are running Quay in an air-gapped environment, you should disable all updaters.
-====
-
-Use the steps in "Configuring a custom Clair database" to configure your database when `clairpostgres` is set to `managed`.
-
-For more information about running Clair in an air-gapped environment, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index#clair-openshift-airgap-database[Configuring access to the Clair database in the air-gapped OpenShift cluster].
+An unmanaged Clair database allows the {productname} Operator to work in a geo-replicated environment, where multiple instances of the Operator must communicate with the same database. An unmanaged Clair database can also be used when a user requires a highly-available (HA) Clair database that exists outside of a cluster.
\ No newline at end of file
diff --git a/modules/clair-updater-urls.adoc b/modules/clair-updater-urls.adoc
index 9e0a99a9a..b7e30e463 100644
--- a/modules/clair-updater-urls.adoc
+++ b/modules/clair-updater-urls.adoc
@@ -1,7 +1,12 @@
-[[clair-updater-urls]]
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: CONCEPT
+[id="clair-updater-urls"]
= Clair updater URLs
-The following are the HTTP hosts and paths that Clair will attempt to talk to in a default configuration. This list is non-exhaustive, as some servers will issue redirects and some request URLs are constructed dynamically.
+The following are the HTTP hosts and paths that Clair will attempt to talk to in a default configuration. This list is non-exhaustive. Some servers issue redirects and some request URLs are constructed dynamically.
* \https://secdb.alpinelinux.org/
* \http://repo.us-west-2.amazonaws.com/2018.03/updates/x86_64/mirror.list
diff --git a/modules/clair-updaters.adoc b/modules/clair-updaters.adoc
new file mode 100644
index 000000000..ba0daf8f4
--- /dev/null
+++ b/modules/clair-updaters.adoc
@@ -0,0 +1,95 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: CONCEPT
+[id="clair-updaters"]
+= Clair updaters
+
+Clair uses `Go` packages called _updaters_ that contain the logic of fetching and parsing different vulnerability databases.
+
+Updaters are usually paired with a matcher to interpret if, and how, any vulnerability is related to a package. Administrators might want to update the vulnerability database less frequently, or not import vulnerabilities from databases that they know will not be used.
+
+[id="configuring-updaters"]
+== Configuring updaters
+
+Updaters can be configured by the `updaters` key at the top of the configuration. If updaters are being run automatically within the matcher process, which is the default setting, the period for running updaters is configured under the matcher's configuration field.
+
+[id="updater-sets"]
+=== Updater sets
+
+The following sets can be configured with Clair updaters:
+
+* `alpine`
+* `aws`
+* `debian`
+* `enricher/cvss`
+* `libvuln/driver`
+* `oracle`
+* `photon`
+* `pyupio`
+* `rhel`
+* `rhel/rhcc`
+* `suse`
+* `ubuntu`
+* `updater`
+
+[id="selecting-updater-sets"]
+=== Selecting updater sets
+
+Specific sets of updaters can be selected by the `sets` list. For example:
+
+[source,yaml]
+----
+updaters:
+ sets:
+ - rhel
+----
+
+If the `sets` field is not populated, it defaults to using all sets.
+
+[id="filtering-updater-sets"]
+=== Filtering updater sets
+
+To reject an updater from running without disabling an entire set, the `filter` option can be used.
+
+In the following example, the string is interpreted as a Go `regexp` package. This rejects any updater with a name that does not match.
+
+[NOTE]
+====
+This means that an empty string matches any string. It does not mean that it matches no strings.
+====
+
+[source,yaml]
+----
+updaters:
+ filter: '^$'
+----
+
+[id="configuring-specific-updaters"]
+=== Configuring specific updaters
+
+Configuration for specific updaters can be passed by putting a key underneath the `config` parameter of the `updaters` object. The name of an updater might be constructed dynamically, and users should examine logs to ensure updater names are accurate. The specific object that an updater expects should be covered in the updater's documentation.
+
+In the following example, the `rhel` updater fetches a manifest from a different location:
+
+[source,yaml]
+----
+updaters:
+ config:
+ rhel:
+ url: https://example.com/mirror/oval/PULP_MANIFEST
+----
+
+[id="disabling-clair-updater-component-managed-db"]
+=== Disabling the Clair Updater component
+
+In some scenarios, users might want to disable the Clair updater component. Disabling updaters is required when running {productname} in a disconnected environment.
+
+In the following example, Clair updaters are disabled:
+
+[source,yaml]
+----
+matcher:
+ disable_updaters: true
+----
\ No newline at end of file
diff --git a/modules/clair-using.adoc b/modules/clair-using.adoc
index 47cfa9069..334c32450 100644
--- a/modules/clair-using.adoc
+++ b/modules/clair-using.adoc
@@ -1,6 +1,15 @@
-[[clair-using]]
+:_content-type: PROCEDURE
+[id="clair-using"]
= Using Clair
+Use the following procedure to ensure that Clair is working on your {productname} Operator deployment.
+
+.Prerequisites
+
+* You have configured Clair for your {ocp} deployment.
+
+.Procedure
+
. Log in to your {productname} cluster and select an organization for which you have configured
Clair scanning.
diff --git a/modules/clair-vulnerability-scanner-hosts.adoc b/modules/clair-vulnerability-scanner-hosts.adoc
new file mode 100644
index 000000000..4857eb947
--- /dev/null
+++ b/modules/clair-vulnerability-scanner-hosts.adoc
@@ -0,0 +1,22 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: CONCEPT
+[id="clair-vulnerability-scanner-hosts"]
+= Clair vulnerability databases
+
+Clair uses the following vulnerability databases to report for issues in your images:
+
+* Ubuntu Oval database
+* Debian Oval database
+* {rhel} Oval database
+* SUSE Oval database
+* Oracle Oval database
+* Alpine SecDB database
+* VMWare Photon OS database
+* Amazon Web Services (AWS) UpdateInfo
+* Pyup.io (Python) database
+
+For information about how Clair does security mapping with the different databases, see
+link:https://quay.github.io/claircore/concepts/severity_mapping.html[ClairCore Severity Mapping].
\ No newline at end of file
diff --git a/modules/clair-vulnerability-scanner-overview.adoc b/modules/clair-vulnerability-scanner-overview.adoc
new file mode 100644
index 000000000..724b4be6d
--- /dev/null
+++ b/modules/clair-vulnerability-scanner-overview.adoc
@@ -0,0 +1,19 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: CONCEPT
+[id="clair-vulnerability-scanner"]
+= Clair for {productname}
+
+Clair v4 (Clair) is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {productname} and can be used in both standalone and Operator deployments. It can be run in highly scalable configurations, where components can be scaled separately as appropriate for enterprise environments.
+
+[NOTE]
+====
+ifeval::["{productname}" == "Red Hat Quay"]
+With the release of {productname} 3.4, Clair v4 (image {productrepo}/{clairimage} fully replaced Clair v2 (image quay.io/redhat/clair-jwt). See below for how to run Clair v2 in read-only mode while Clair v4 is updating.
+endif::[]
+ifeval::["{productname}" == "Project Quay"]
+With the release of Clair v4 (image clair), the previously used Clair v2 (image clair-jwt) is no longer used. See below for how to run Clair v2 in read-only mode while Clair v4 is updating.
+endif::[]
+====
diff --git a/modules/clairv4-air-gapped.adoc b/modules/clairv4-air-gapped.adoc
index bc01c289f..fe0b14c5c 100644
--- a/modules/clairv4-air-gapped.adoc
+++ b/modules/clairv4-air-gapped.adoc
@@ -1,10 +1,10 @@
[[clairv4-air-gapped]]
= Air-gapped Clair v4
-{productname} 3.4 and later and Clair v4 are supported in disconnected environments. By default, Clair v4 will attempt to run automated updates against Red Hat servers. When Clair v4 in network environments is disconnected from the internet:
+{productname} 3.4 and later and Clair v4 are supported in disconnected environments. By default, Clair v4 will attempt to run automated updates against Red Hat servers. When Clair v4 in network environments is disconnected from the internet:
-* The Clair v4 auto-update is disabled in the Clair `config` bundle.
-* On a system with internet access, the vulnerability database updates is performed manually and exported to a disk.
-* The on-disk data is then transferred to the target system with offline media. It is then manually imported.
+* The Clair v4 auto-update is disabled in the Clair `config` bundle.
+* On a system with internet access, the vulnerability database updates is performed manually and exported to a disk.
+* The on-disk data is then transferred to the target system with offline media. It is then manually imported.
-For more information on air-gapped Clair v4 and using `clairctl`, the command line tool, see https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-airgap-update[Manually updating the vulnerability databases for Clair in an air-gapped OpenShift cluster]
+For more information on air-gapped Clair v4 and using `clairctl`, the command line tool, see https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-disconnected-environments[Manually updating the vulnerability databases for Clair in an air-gapped OpenShift cluster]
diff --git a/modules/configuring-clair-disconnected-environment.adoc b/modules/configuring-clair-disconnected-environment.adoc
new file mode 100644
index 000000000..314beeffb
--- /dev/null
+++ b/modules/configuring-clair-disconnected-environment.adoc
@@ -0,0 +1,39 @@
+:_content-type: PROCEDURE
+[id="configuring-clair-disconnected-environment"]
+== Configuring Clair for disconnected environments
+
+Use the following procedure to configure Clair for a disconnected environment.
+
+.Prerequisites
+
+* You have installed the `clairctl` tool to be run as a binary, or by the Clair container image.
+
+.Procedure
+
+. In your `config.yaml` file, set your Clair configuration to disable updaters from running:
++
+.config.yaml
+[source,yaml]
+----
+matcher:
+ disable_updaters: true
+----
+
+. Export the latest updater data to a local archive. The following command assumes that your Clair configuration is in `/etc/clairv4/config/config.yaml`
++
+[subs="verbatim,attributes"]
+----
+$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml export-updaters /updaters/updaters.gz
+----
++
+[NOTE]
+====
+You must explicitly reference the Clair configuration. This creates the updater archive in `/etc/clairv4/updaters/updaters.gz`. To ensure that the archive was created without any errors from the source databases, you can use the `--strict` flag with `clairctl`. The archive file should be copied over to a volume that is accessible from the disconnected host running Clair.
+====
+
+. From the disconnected host, use the following command to import the archive into Clair:
++
+[subs="verbatim,attributes"]
+----
+$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml import-updaters /updaters/updaters.gz
+----
diff --git a/modules/configuring-custom-clair-database-managed.adoc b/modules/configuring-custom-clair-database-managed.adoc
new file mode 100644
index 000000000..28bb1d11e
--- /dev/null
+++ b/modules/configuring-custom-clair-database-managed.adoc
@@ -0,0 +1,77 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
+[id="configuring-custom-clair-database-managed"]
+= Configuring a custom Clair database with a managed Clair configuration
+
+The {productname} Operator for {ocp} allows users to provide their own Clair database.
+
+Use the following procedure to create a custom Clair database.
+
+.Procedure
+
+. Create a Quay configuration bundle secret that includes the `clair-config.yaml` by entering the following command:
++
+[source,terminal]
+----
+$ oc create secret generic --from-file config.yaml=./config.yaml --from-file extra_ca_cert_rds-ca-2019-root.pem=./rds-ca-2019-root.pem --from-file clair-config.yaml=./clair-config.yaml config-bundle-secret
+----
++
+.Example Clair `config.yaml` file
++
+[source,yaml]
+----
+indexer:
+ connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslmode=disable
+ layer_scan_concurrency: 6
+ migrations: true
+ scanlock_retry: 11
+log_level: debug
+matcher:
+ connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslmode=disable
+ migrations: true
+metrics:
+ name: prometheus
+notifier:
+ connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslmode=disable
+ migrations: true
+----
++
+[NOTE]
+====
+* The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`.
+* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config].
+====
+
+. Add the `clair-config.yaml` file to your bundle secret, for example:
++
+[source,yaml]
+----
+apiVersion: v1
+kind: Secret
+metadata:
+ name: config-bundle-secret
+ namespace: quay-enterprise
+data:
+ config.yaml:
+ clair-config.yaml:
+----
++
+[NOTE]
+====
+* When updated, the provided `clair-config.yaml` file is mounted into the Clair pod. Any fields not provided are automatically populated with defaults using the Clair configuration module.
+====
+
+. You can check the status of your Clair pod by clicking the commit in the *Build History* page, or by running `oc get pods -n `. For example:
++
+----
+$ oc get pods -n
+----
++
+.Example output
+----
+NAME READY STATUS RESTARTS AGE
+f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s
+----
\ No newline at end of file
diff --git a/modules/configuring-custom-clair-database.adoc b/modules/configuring-custom-clair-database.adoc
new file mode 100644
index 000000000..8395574b5
--- /dev/null
+++ b/modules/configuring-custom-clair-database.adoc
@@ -0,0 +1,85 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
+[id="configuring-custom-clair-database"]
+= Configuring a custom Clair database with an unmanaged Clair database
+
+The {productname} Operator for {ocp} allows users to provide their own Clair database.
+
+Use the following procedure to create a custom Clair database.
+
+[NOTE]
+====
+The following procedure sets up Clair with SSL/TLS certifications. To view a similar procedure that does not set up Clair with SSL/TSL certifications, see "Configuring a custom Clair database with a managed Clair configuration".
+====
+
+.Procedure
+
+. Create a Quay configuration bundle secret that includes the `clair-config.yaml` by entering the following command:
++
+[source,terminal]
+----
+$ oc create secret generic --from-file config.yaml=./config.yaml --from-file extra_ca_cert_rds-ca-2019-root.pem=./rds-ca-2019-root.pem --from-file clair-config.yaml=./clair-config.yaml --from-file ssl.cert=./ssl.cert --from-file ssl.key=./ssl.key config-bundle-secret
+----
++
+.Example Clair `config.yaml` file
++
+[source,yaml]
+----
+indexer:
+ connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca
+ layer_scan_concurrency: 6
+ migrations: true
+ scanlock_retry: 11
+log_level: debug
+matcher:
+ connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca
+ migrations: true
+metrics:
+ name: prometheus
+notifier:
+ connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca
+ migrations: true
+----
++
+[NOTE]
+====
+* The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`.
+* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config].
+====
+
+. Add the `clair-config.yaml` file to your bundle secret, for example:
++
+[source,yaml]
+----
+apiVersion: v1
+kind: Secret
+metadata:
+ name: config-bundle-secret
+ namespace: quay-enterprise
+data:
+ config.yaml:
+ clair-config.yaml:
+ extra_ca_cert_:
+ clair-ssl.crt: >-
+ clair-ssl.key: >-
+----
++
+[NOTE]
+====
+When updated, the provided `clair-config.yaml` file is mounted into the Clair pod. Any fields not provided are automatically populated with defaults using the Clair configuration module.
+====
+
+. You can check the status of your Clair pod by clicking the commit in the *Build History* page, or by running `oc get pods -n `. For example:
++
+----
+$ oc get pods -n
+----
++
+.Example output
+----
+NAME READY STATUS RESTARTS AGE
+f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s
+----
\ No newline at end of file
diff --git a/modules/custom-clair-configuration-managed-database.adoc b/modules/custom-clair-configuration-managed-database.adoc
new file mode 100644
index 000000000..34f518776
--- /dev/null
+++ b/modules/custom-clair-configuration-managed-database.adoc
@@ -0,0 +1,14 @@
+:_content-type: CONCEPT
+[id="custom-clair-configuration-managed-database"]
+= Running a custom Clair configuration with a managed Clair database
+
+In some cases, users might want to run a custom Clair configuration with a managed Clair database. This is useful in the following scenarios:
+
+* When a user wants to disable specific updater resources.
+* When a user is running {productname} in an disconnected environment. For more information about running Clair in a disconnected environment, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index#clair-openshift-airgap-database[Configuring access to the Clair database in the air-gapped OpenShift cluster].
++
+[NOTE]
+====
+* If you are running {productname} in an disconnected environment, the `airgap` parameter of your `clair-config.yaml` must be set to `true`.
+* If you are running {productname} in an disconnected environment, you should disable all updater components.
+====
diff --git a/modules/fips-overview.adoc b/modules/fips-overview.adoc
index d121fe867..666f86408 100644
--- a/modules/fips-overview.adoc
+++ b/modules/fips-overview.adoc
@@ -1,6 +1,11 @@
-[[fips-overview]]
-= FIPS readiness and compliance
+// Module included in the following assemblies:
+//
+// clair/master.adoc
-FIPS (the Federal Information Processing Standard developed by the National Institute of Standards and Technology, NIST) is regarded as the gold standard for securing and encrypting sensitive data, particularly in heavily regulated areas such as banking, healthcare and the public sector. Red Hat Enterprise Linux and Red Hat OpenShift Container Platform support this standard by providing a FIPS mode in which the system would only allow usage of certain, FIPS-validated cryptographic modules, like `openssl`. This ensures FIPS compliance.
+:_content-type: CONCEPT
+[id="fips-overview"]
+= Federal Information Processing Standard (FIPS) readiness and compliance
-{productname} supports running on FIPS-enabled RHEL and Red Hat OpenShift Container Platform from version 3.5.
+The Federal Information Processing Standard (FIPS) developed by the National Institute of Standards and Technology (NIST) is regarded as the highly regarded for securing and encrypting sensitive data, notably in highly regulated areas such as banking, healthcare, and the public sector. {rhel} and {ocp} support the FIPS standard by providing a _FIPS mode_, in which the system only allows usage of specific FIPS-validated cryptographic modules like `openssl`. This ensures FIPS compliance.
+
+{productname} supports running on FIPS-enabled RHEL and {ocp} environments from {productname} version 3.5.0.
diff --git a/modules/internal-api.adoc b/modules/internal-api.adoc
new file mode 100644
index 000000000..1af8ee816
--- /dev/null
+++ b/modules/internal-api.adoc
@@ -0,0 +1,30 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: CONCEPT
+[id="internal-api"]
+= Internal API endpoints for Clair
+
+Internal API endpoints are embedded in `/api/v1/internal` and are meant for communication between Clair microservices.
+
+[IMPORTANT]
+====
+* If your Clair `config.yaml` file is set to `CLAIR_MODE=combo`, internal API endpoints might not exist.
+* APIs are not formally exposed in Clair's OpenAPI Specification. Further information and usage is dependent on the reader.
+====
+
+[id="update-diffs"]
+== Update diffs
+
+The `update_diff` endpoint exposes the API for diffing two update operations. This parameter is used by the notifier to determine the added and removed vulnerabilities on security database updates.
+
+[id="update-operations"]
+== Update operation
+
+The `update_operation` endpoint exposes the API for viewing updaters activity. This is used by the notifier to determine if new updates have occurred, and triggers an update diff to see what has changed.
+
+[id=affected-manifest]
+== AffectedManifest
+
+The `affected_manifest` endpoint exposes the API for retrieving affected manifests given a list of vulnerabilities. This is used by the notifier to determine the manifests that need to have a notification generated.
\ No newline at end of file
diff --git a/modules/managed-clair-database.adoc b/modules/managed-clair-database.adoc
new file mode 100644
index 000000000..c75ec006c
--- /dev/null
+++ b/modules/managed-clair-database.adoc
@@ -0,0 +1,28 @@
+:_content-type: PROCEDURE
+[id="managed-clair-database"]
+= Setting a Clair database to managed
+
+Use the following procedure to set your Clair database to managed.
+
+.Procedure
+
+* In the Quay Operator, set the `clairpostgres` component of the `QuayRegistry` custom resource to `managed: true`:
++
+[source,yaml]
+----
+apiVersion: quay.redhat.com/v1
+kind: QuayRegistry
+metadata:
+ name: quay370
+spec:
+ configBundleSecret: config-bundle-secret
+ components:
+ - kind: objectstorage
+ managed: false
+ - kind: route
+ managed: true
+ - kind: tls
+ managed: false
+ - kind: clairpostgres
+ managed: true
+----
\ No newline at end of file
diff --git a/modules/mapping-repositories-to-cpe-information.adoc b/modules/mapping-repositories-to-cpe-information.adoc
new file mode 100644
index 000000000..86695ec8d
--- /dev/null
+++ b/modules/mapping-repositories-to-cpe-information.adoc
@@ -0,0 +1,44 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: CONCEPT
+[id="mapping-repositories-to-cpe-information"]
+= Mapping repositories to Common Product Enumeration information
+
+Clair's {rhel} scanner relies on a Common Product Enumeration (CPE) file to map RPM packages to the corresponding security data to produce matching results. These files are owned by product security and updated daily.
+
+The CPE file must be present, or access to the file must be allowed, for the scanner to properly process RPM packages. If the file is not present, RPM packages installed in the container image will not be scanned.
+
+.Clair CPE mapping files
+[options="header"]
+|===
+|CPE | Link to JSON mapping file
+| `repos2cpe` | link:https://www.redhat.com/security/data/metrics/repository-to-cpe.json[Red Hat Repository-to-CPE JSON]
+| `names2repos` | link:https://access.redhat.com/security/data/metrics/container-name-repos-map.json[Red Hat Name-to-Repos JSON].
+|===
+
+In addition to uploading CVE information to the database for disconnected Clair installations, you must also make the mapping file available locally:
+
+* For standalone {productname} and Clair deployments, the mapping file must be loaded into the Clair pod.
+
+* For {productname} Operator deployments on {ocp} and Clair deployments, you must set the Clair component to `unamanged`. Then, Clair must be deployed manually, setting the configuration to load a local copy of the mapping file.
+
+[id="mapping-repositories-to-cpe-configuration"]
+== Mapping repositories to Common Product Enumeration example configuration
+
+Use the `repo2cpe_mapping_file` and `name2repos_mapping_file` fields in your Clair configuration to include the CPE JSON mapping files. For example:
+
+[source,yaml]
+----
+indexer:
+ scanner:
+ repo:
+ rhel-repository-scanner:
+ repo2cpe_mapping_file: /data/cpe-map.json
+ package:
+ rhel_containerscanner:
+ name2repos_mapping_file: /data/repo-map.json
+----
+
+For more information, see link:https://www.redhat.com/en/blog/how-accurately-match-oval-security-data-installed-rpms[How to accurately match OVAL security data to installed RPMs].
\ No newline at end of file
diff --git a/modules/operator-upgrade.adoc b/modules/operator-upgrade.adoc
index 8138a5f5f..80041a7fe 100644
--- a/modules/operator-upgrade.adoc
+++ b/modules/operator-upgrade.adoc
@@ -84,7 +84,7 @@ If possible, you should regenerate your TLS certificates with the correct hostna
The `GODEBUG=x509ignoreCN=0` flag enables the legacy behavior of treating the CommonName field on X.509 certificates as a host name when no SANs are present. However, this workaround is not recommended, as it will not persist across a redeployment.
==== Configuring Clair v4 when upgrading from 3.3.z or 3.4.z to 3.6 using the Quay Operator
-To set up Clair v4 on a new {productname} deployment on OpenShift, it is highly recommended to use the Quay Operator. By default, the Quay Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair security scanning automatically.
+To set up Clair v4 on a new {productname} deployment on OpenShift, it is highly recommended to use the Quay Operator. By default, the Quay Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair automatically.
For instructions on setting up Clair v4 on OpenShift, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-openshift[Setting Up Clair on a {productname} OpenShift deployment].
diff --git a/modules/testing-clair.adoc b/modules/testing-clair.adoc
new file mode 100644
index 000000000..360ef3939
--- /dev/null
+++ b/modules/testing-clair.adoc
@@ -0,0 +1,61 @@
+:_content-type: CONCEPT
+[id="testing-clair"]
+= Testing Clair
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+////
+
+Currently, there are two methods for testing Clair independently of a {productname} subscription:
+
+* In a local development environment
+* In a distributed deployment
+
+[IMPORTANT]
+====
+Official documentation for testing Clair without a {productname} subscription is unsupported. These procedures and subsequent updates are maintained by upstream contributors and developers. For more information, see link:https://quay.github.io/clair/howto/getting_started.html[Getting Started With ClairV4].
+
+For official Clair documentation, see. . .
+====
+
+[id="testing-clair-local-development-environment"]
+== Testing Clair in a local development environment
+
+The simplest way to run Clair for test purposes is to use the local development environment. The local development environment can be used to test and develop Clair's integration with {productname}. Documentation for this procedure can be found on the open source Clair project at link:https://quay.github.io/clair/howto/testing.html[Testing ClairV4].
+
+[id="clair-modes"]
+== Testing Clair in a distributed deployment
+
+When testing Clair in a distributed deployment, Clair uses PostgreSQL for its data persistence. Clair migrations are supported. Users can point Clair to a fresh database to set it up.
+
+In a distributed deployment, users can test run Clair in the following modes:
+
+* Indexer mode. When Clair is running in indexer mode, it is responsible for receiving manifests and generating `IndexReports`. An `IndexReport` is an intermediate representation of a manifest's content and is used to discover vulnerabilities.
+
+* Matcher mode. When Clair is running in matcher mode, it is responsible for receiving `IndexReports` and generating `VulnerabilityReports`. A `VulnerabilityReport` describes the contents of a manifest and any vulnerabilities affecting it.
+
+* Notifier mode. When Clair is running in notifier mode, it is responsible for generating notifications when new vulnerabilities affecting a previously indexed manifest enter the system. The notifier will send notifications through the configured mechanisms.
+
+* Combination mode. When Clair is running in combination mode, the `indexer`, `matcher`, and `notifier` each run on a single OS process.
+
+[NOTE]
+====
+These modes are available when running Clair with a {productname} subscription.
+====
+
+For more information on testing Clair in a distributed deployment, see link:https://quay.github.io/clair/howto/getting_started.html#modes[Getting Started With ClairV4].
diff --git a/modules/unmanaging-clair-database.adoc b/modules/unmanaging-clair-database.adoc
new file mode 100644
index 000000000..99929ee33
--- /dev/null
+++ b/modules/unmanaging-clair-database.adoc
@@ -0,0 +1,32 @@
+// Module included in the following assemblies:
+//
+// clair/master.adoc
+
+:_content-type: PROCEDURE
+[id="unmanaging-clair-database"]
+= Running a custom Clair configuration with an unmanaged Clair database
+
+Use the following procedure to set your Clair database to unmanaged.
+
+.Procedure
+
+* In the Quay Operator, set the `clairpostgres` component of the `QuayRegistry` custom resource to `managed: false`:
++
+[source,yaml]
+----
+apiVersion: quay.redhat.com/v1
+kind: QuayRegistry
+metadata:
+ name: quay370
+spec:
+ configBundleSecret: config-bundle-secret
+ components:
+ - kind: objectstorage
+ managed: false
+ - kind: route
+ managed: true
+ - kind: tls
+ managed: false
+ - kind: clairpostgres
+ managed: false
+----
\ No newline at end of file