diff --git a/.github/workflows/preview.yml b/.github/workflows/preview.yml new file mode 100644 index 000000000..79428caae --- /dev/null +++ b/.github/workflows/preview.yml @@ -0,0 +1,105 @@ +# This is a basic workflow to help you get started with Actions + +name: ccutil-workflow + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the master branch + push: + branches: [ master ] + #pull_request: + # branches: [ master ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + get: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + - uses: actions/checkout@v2 + with: + ref: master + path: source + + - uses: actions/checkout@v2 + with: + ref: gh-pages + path: dest + + - name: whereami + run: | + cd source + pwd + ls + ls ${{ github.workspace }}/ + cd ${{ github.workspace }}/ + pwd + ls + + + + - uses: addnab/docker-run-action@v3 + with: + image: quay.io/rhn_support_gmcgoldr/levccutil + options: -v ${{ github.workspace }}:/work + shell: bash + run: | + cd /work/source/release_notes + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/release_notes/build/tmp/en-US/html-single/* /work/dest/master/release_notes/ + cd /work/source/deploy_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/deploy_quay/build/tmp/en-US/html-single/* /work/dest/master/deploy_quay/ + cd /work/source/deploy_quay_on_openshift_op_tng + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/deploy_quay_on_openshift_op_tng/build/tmp/en-US/html-single/* /work/dest/master/deploy_quay_on_openshift_op_tng/ + cd /work/source/deploy_quay_ha + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/deploy_quay_ha/build/tmp/en-US/html-single/* /work/dest/master/deploy_quay_ha/ + cd /work/source/config_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/config_quay/build/tmp/en-US/html-single/* /work/dest/master/config_quay/ + cd /work/source/manage_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/manage_quay/build/tmp/en-US/html-single/* /work/dest/master/manage_quay/ + cd /work/source/upgrade_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/upgrade_quay/build/tmp/en-US/html-single/* /work/dest/master/upgrade_quay/ + cd /work/source/use_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/use_quay/build/tmp/en-US/html-single/* /work/dest/master/use_quay/ + cd /work/source/api + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/api/build/tmp/en-US/html-single/* /work/dest/master/api/ + cd /work/source/architecture + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/architecture/build/tmp/en-US/html-single/* /work/dest/master/architecture/ + + - name: commit + run: | + cd dest + git status + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + #git diff + #git diff-index --quiet HEAD || (git commit -a -m 'DOCS Auto-update' --allow-empty && git push -f) + git add . + git commit -m "update docs" + + + - name: Push changes + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: gh-pages + directory: dest + + + # Runs a single command using the runners shell + # docker run -ti --rm --privileged -v source:/source quay.io/rhn_support_gmcgoldr/levccutil "/bin/bash cd /source/deploy_quay; ls; ccutil compile --lang en_US --type asciidoc --main-file master.adoc; ls -al " + diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..f93e1dbd0 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,8 @@ +{ + "spellright.language": [], + "spellright.documentTypes": [ + "markdown", + "latex", + "plaintext" + ] +} \ No newline at end of file diff --git a/api/master.adoc b/api/master.adoc index a47e5f857..d31f59ced 100644 --- a/api/master.adoc +++ b/api/master.adoc @@ -1,14 +1,23 @@ +:_content-type: ASSEMBLY + + include::modules/attributes.adoc[] -[id='api'] +[id="api"] = {productname} API Guide -The {productname} application programming interface (API) is an OAuth 2 RESTful API that -consists of a set of endpoints -for adding, displaying, changing and deleting features for {productname}. -This guide describes those endpoints and shows command and browser-based examples -for accessing them. +The {productname} application programming interface (API) is an OAuth 2 RESTful API that consists of a set of endpoints for adding, displaying, changing and deleting features for {productname}. + +{productname} abides by the link:https://semver.org/#summary[Semantic Versioning (SemVer) specifications]. The following conditions are met with each major, minor, and patch release: + +* Major versions of {productname} might include incompatible API changes. For example, the API of {productname} 2.0 differs from {productname} 3.0. +* Minor versions of {productname}, for example, 3.y, adds functionality in a backwards compatible manner. +* Patch versions of {productname}, for example, 3.y.z, introduces backwards compatible bug fixes. + +Currently, {productname} uses the `api/v1` endpoint for 3.y.z releases. + +This guide describes the `api/v1` endpoints and the browser-based examples for accessing those endpoints. include::modules/proc_use-api.adoc[leveloffset=+1] @@ -23,18 +32,18 @@ include::modules/api-authorization.adoc[leveloffset=+2] include::modules/api-appspecifictokens.adoc[leveloffset=+2] -include::modules/api-appspecifictokens-listAppTokens.adoc[leveloffset=+3] include::modules/api-appspecifictokens-createAppToken.adoc[leveloffset=+3] -include::modules/api-appspecifictokens-revokeAppToken.adoc[leveloffset=+3] +include::modules/api-appspecifictokens-listAppTokens.adoc[leveloffset=+3] include::modules/api-appspecifictokens-getAppToken.adoc[leveloffset=+3] +include::modules/api-appspecifictokens-revokeAppToken.adoc[leveloffset=+3] include::modules/api-build.adoc[leveloffset=+2] include::modules/api-build-getRepoBuildStatus.adoc[leveloffset=+3] include::modules/api-build-getRepoBuildLogs.adoc[leveloffset=+3] -include::modules/api-build-cancelRepoBuild.adoc[leveloffset=+3] include::modules/api-build-getRepoBuild.adoc[leveloffset=+3] -include::modules/api-build-getRepoBuilds.adoc[leveloffset=+3] +include::modules/api-build-cancelRepoBuild.adoc[leveloffset=+3] include::modules/api-build-requestRepoBuild.adoc[leveloffset=+3] +include::modules/api-build-getRepoBuilds.adoc[leveloffset=+3] include::modules/api-discovery.adoc[leveloffset=+2] include::modules/api-discovery-discovery.adoc[leveloffset=+3] @@ -43,8 +52,8 @@ include::modules/api-error.adoc[leveloffset=+2] include::modules/api-error-getErrorDescription.adoc[leveloffset=+3] include::modules/api-globalmessages.adoc[leveloffset=+2] -include::modules/api-globalmessages-getGlobalMessages.adoc[leveloffset=+3] include::modules/api-globalmessages-createGlobalMessage.adoc[leveloffset=+3] +include::modules/api-globalmessages-getGlobalMessages.adoc[leveloffset=+3] include::modules/api-globalmessages-deleteGlobalMessage.adoc[leveloffset=+3] include::modules/api-logs.adoc[leveloffset=+2] @@ -59,33 +68,33 @@ include::modules/api-logs-exportRepoLogs.adoc[leveloffset=+3] include::modules/api-logs-listRepoLogs.adoc[leveloffset=+3] include::modules/api-manifest.adoc[leveloffset=+2] -include::modules/api-manifest-deleteManifestLabel.adoc[leveloffset=+3] include::modules/api-manifest-getManifestLabel.adoc[leveloffset=+3] -include::modules/api-manifest-listManifestLabels.adoc[leveloffset=+3] +include::modules/api-manifest-deleteManifestLabel.adoc[leveloffset=+3] include::modules/api-manifest-addManifestLabel.adoc[leveloffset=+3] +include::modules/api-manifest-listManifestLabels.adoc[leveloffset=+3] include::modules/api-manifest-getRepoManifest.adoc[leveloffset=+3] include::modules/api-mirror.adoc[leveloffset=+2] include::modules/api-mirror-syncCancel.adoc[leveloffset=+3] include::modules/api-mirror-syncNow.adoc[leveloffset=+3] include::modules/api-mirror-getRepoMirrorConfig.adoc[leveloffset=+3] -include::modules/api-mirror-createRepoMirrorConfig.adoc[leveloffset=+3] include::modules/api-mirror-changeRepoMirrorConfig.adoc[leveloffset=+3] +include::modules/api-mirror-createRepoMirrorConfig.adoc[leveloffset=+3] include::modules/api-namespacequota.adoc[leveloffset=+2] include::modules/api-namespacequota-listUserQuota.adoc[leveloffset=+3] include::modules/api-namespacequota-getOrganizationQuotaLimit.adoc[leveloffset=+3] -include::modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc[leveloffset=+3] include::modules/api-namespacequota-changeOrganizationQuotaLimit.adoc[leveloffset=+3] -include::modules/api-namespacequota-listOrganizationQuotaLimit.adoc[leveloffset=+3] +include::modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc[leveloffset=+3] include::modules/api-namespacequota-createOrganizationQuotaLimit.adoc[leveloffset=+3] +include::modules/api-namespacequota-listOrganizationQuotaLimit.adoc[leveloffset=+3] include::modules/api-namespacequota-getUserQuotaLimit.adoc[leveloffset=+3] include::modules/api-namespacequota-listUserQuotaLimit.adoc[leveloffset=+3] include::modules/api-namespacequota-getOrganizationQuota.adoc[leveloffset=+3] -include::modules/api-namespacequota-deleteOrganizationQuota.adoc[leveloffset=+3] include::modules/api-namespacequota-changeOrganizationQuota.adoc[leveloffset=+3] -include::modules/api-namespacequota-listOrganizationQuota.adoc[leveloffset=+3] +include::modules/api-namespacequota-deleteOrganizationQuota.adoc[leveloffset=+3] include::modules/api-namespacequota-createOrganizationQuota.adoc[leveloffset=+3] +include::modules/api-namespacequota-listOrganizationQuota.adoc[leveloffset=+3] include::modules/api-namespacequota-getUserQuota.adoc[leveloffset=+3] include::modules/api-organization.adoc[leveloffset=+2] @@ -93,61 +102,61 @@ include::modules/api-organization-createOrganization.adoc[leveloffset=+3] include::modules/api-organization-validateProxyCacheConfig.adoc[leveloffset=+3] include::modules/api-organization-getOrganizationCollaborators.adoc[leveloffset=+3] include::modules/api-organization-getOrganizationApplication.adoc[leveloffset=+3] -include::modules/api-organization-deleteOrganizationApplication.adoc[leveloffset=+3] include::modules/api-organization-updateOrganizationApplication.adoc[leveloffset=+3] -include::modules/api-organization-getOrganizationApplications.adoc[leveloffset=+3] +include::modules/api-organization-deleteOrganizationApplication.adoc[leveloffset=+3] include::modules/api-organization-createOrganizationApplication.adoc[leveloffset=+3] +include::modules/api-organization-getOrganizationApplications.adoc[leveloffset=+3] include::modules/api-organization-getProxyCacheConfig.adoc[leveloffset=+3] include::modules/api-organization-deleteProxyCacheConfig.adoc[leveloffset=+3] include::modules/api-organization-createProxyCacheConfig.adoc[leveloffset=+3] -include::modules/api-organization-removeOrganizationMember.adoc[leveloffset=+3] include::modules/api-organization-getOrganizationMember.adoc[leveloffset=+3] +include::modules/api-organization-removeOrganizationMember.adoc[leveloffset=+3] include::modules/api-organization-getOrganizationMembers.adoc[leveloffset=+3] include::modules/api-organization-getOrganization.adoc[leveloffset=+3] -include::modules/api-organization-deleteAdminedOrganization.adoc[leveloffset=+3] include::modules/api-organization-changeOrganizationDetails.adoc[leveloffset=+3] +include::modules/api-organization-deleteAdminedOrganization.adoc[leveloffset=+3] include::modules/api-organization-getApplicationInformation.adoc[leveloffset=+3] include::modules/api-permission.adoc[leveloffset=+2] include::modules/api-permission-getUserTransitivePermission.adoc[leveloffset=+3] include::modules/api-permission-getUserPermissions.adoc[leveloffset=+3] -include::modules/api-permission-deleteUserPermissions.adoc[leveloffset=+3] include::modules/api-permission-changeUserPermissions.adoc[leveloffset=+3] +include::modules/api-permission-deleteUserPermissions.adoc[leveloffset=+3] include::modules/api-permission-getTeamPermissions.adoc[leveloffset=+3] -include::modules/api-permission-deleteTeamPermissions.adoc[leveloffset=+3] include::modules/api-permission-changeTeamPermissions.adoc[leveloffset=+3] +include::modules/api-permission-deleteTeamPermissions.adoc[leveloffset=+3] include::modules/api-permission-listRepoTeamPermissions.adoc[leveloffset=+3] include::modules/api-permission-listRepoUserPermissions.adoc[leveloffset=+3] include::modules/api-prototype.adoc[leveloffset=+2] -include::modules/api-prototype-deleteOrganizationPrototypePermission.adoc[leveloffset=+3] include::modules/api-prototype-updateOrganizationPrototypePermission.adoc[leveloffset=+3] -include::modules/api-prototype-getOrganizationPrototypePermissions.adoc[leveloffset=+3] +include::modules/api-prototype-deleteOrganizationPrototypePermission.adoc[leveloffset=+3] include::modules/api-prototype-createOrganizationPrototypePermission.adoc[leveloffset=+3] +include::modules/api-prototype-getOrganizationPrototypePermissions.adoc[leveloffset=+3] include::modules/api-repository.adoc[leveloffset=+2] -include::modules/api-repository-listRepos.adoc[leveloffset=+3] include::modules/api-repository-createRepo.adoc[leveloffset=+3] +include::modules/api-repository-listRepos.adoc[leveloffset=+3] include::modules/api-repository-changeRepoVisibility.adoc[leveloffset=+3] include::modules/api-repository-changeRepoState.adoc[leveloffset=+3] include::modules/api-repository-getRepo.adoc[leveloffset=+3] -include::modules/api-repository-deleteRepository.adoc[leveloffset=+3] include::modules/api-repository-updateRepo.adoc[leveloffset=+3] +include::modules/api-repository-deleteRepository.adoc[leveloffset=+3] include::modules/api-repositorynotification.adoc[leveloffset=+2] include::modules/api-repositorynotification-testRepoNotification.adoc[leveloffset=+3] include::modules/api-repositorynotification-getRepoNotification.adoc[leveloffset=+3] include::modules/api-repositorynotification-deleteRepoNotification.adoc[leveloffset=+3] include::modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc[leveloffset=+3] -include::modules/api-repositorynotification-listRepoNotifications.adoc[leveloffset=+3] include::modules/api-repositorynotification-createRepoNotification.adoc[leveloffset=+3] +include::modules/api-repositorynotification-listRepoNotifications.adoc[leveloffset=+3] include::modules/api-repotoken.adoc[leveloffset=+2] include::modules/api-repotoken-getTokens.adoc[leveloffset=+3] -include::modules/api-repotoken-deleteToken.adoc[leveloffset=+3] include::modules/api-repotoken-changeToken.adoc[leveloffset=+3] -include::modules/api-repotoken-listRepoTokens.adoc[leveloffset=+3] +include::modules/api-repotoken-deleteToken.adoc[leveloffset=+3] include::modules/api-repotoken-createToken.adoc[leveloffset=+3] +include::modules/api-repotoken-listRepoTokens.adoc[leveloffset=+3] include::modules/api-robot.adoc[leveloffset=+2] include::modules/api-robot-getUserRobots.adoc[leveloffset=+3] @@ -156,12 +165,12 @@ include::modules/api-robot-regenerateOrgRobotToken.adoc[leveloffset=+3] include::modules/api-robot-getUserRobotPermissions.adoc[leveloffset=+3] include::modules/api-robot-regenerateUserRobotToken.adoc[leveloffset=+3] include::modules/api-robot-getOrgRobot.adoc[leveloffset=+3] -include::modules/api-robot-deleteOrgRobot.adoc[leveloffset=+3] include::modules/api-robot-createOrgRobot.adoc[leveloffset=+3] +include::modules/api-robot-deleteOrgRobot.adoc[leveloffset=+3] include::modules/api-robot-getOrgRobots.adoc[leveloffset=+3] include::modules/api-robot-getUserRobot.adoc[leveloffset=+3] -include::modules/api-robot-deleteUserRobot.adoc[leveloffset=+3] include::modules/api-robot-createUserRobot.adoc[leveloffset=+3] +include::modules/api-robot-deleteUserRobot.adoc[leveloffset=+3] include::modules/api-search.adoc[leveloffset=+2] include::modules/api-search-conductRepoSearch.adoc[leveloffset=+3] @@ -172,55 +181,66 @@ include::modules/api-secscan.adoc[leveloffset=+2] include::modules/api-secscan-getRepoManifestSecurity.adoc[leveloffset=+3] include::modules/api-superuser.adoc[leveloffset=+2] -include::modules/api-superuser-listAllUsers.adoc[leveloffset=+3] include::modules/api-superuser-createInstallUser.adoc[leveloffset=+3] +include::modules/api-superuser-listAllUsers.adoc[leveloffset=+3] include::modules/api-superuser-listAllLogs.adoc[leveloffset=+3] -include::modules/api-superuser-listServiceKeys.adoc[leveloffset=+3] include::modules/api-superuser-createServiceKey.adoc[leveloffset=+3] -include::modules/api-superuser-deleteUserQuotaSuperUser.adoc[leveloffset=+3] +include::modules/api-superuser-listServiceKeys.adoc[leveloffset=+3] include::modules/api-superuser-changeUserQuotaSuperUser.adoc[leveloffset=+3] +include::modules/api-superuser-deleteUserQuotaSuperUser.adoc[leveloffset=+3] include::modules/api-superuser-createUserQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc[leveloffset=+3] +include::modules/api-superuser-listUserQuotaSuperUser.adoc[leveloffset=+3] include::modules/api-superuser-changeOrganizationQuotaSuperUser.adoc[leveloffset=+3] +include::modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc[leveloffset=+3] include::modules/api-superuser-createOrganizationQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-deleteOrganization.adoc[leveloffset=+3] +include::modules/api-superuser-listOrganizationQuotaSuperUser.adoc[leveloffset=+3] include::modules/api-superuser-changeOrganization.adoc[leveloffset=+3] +include::modules/api-superuser-deleteOrganization.adoc[leveloffset=+3] include::modules/api-superuser-approveServiceKey.adoc[leveloffset=+3] include::modules/api-superuser-getServiceKey.adoc[leveloffset=+3] -include::modules/api-superuser-deleteServiceKey.adoc[leveloffset=+3] include::modules/api-superuser-updateServiceKey.adoc[leveloffset=+3] +include::modules/api-superuser-deleteServiceKey.adoc[leveloffset=+3] include::modules/api-superuser-getRepoBuildStatusSuperUser.adoc[leveloffset=+3] include::modules/api-superuser-getRepoBuildSuperUser.adoc[leveloffset=+3] include::modules/api-superuser-getRepoBuildLogsSuperUser.adoc[leveloffset=+3] include::modules/api-tag.adoc[leveloffset=+2] include::modules/api-tag-restoreTag.adoc[leveloffset=+3] -include::modules/api-tag-deleteFullTag.adoc[leveloffset=+3] include::modules/api-tag-changeTag.adoc[leveloffset=+3] +include::modules/api-tag-deleteFullTag.adoc[leveloffset=+3] include::modules/api-tag-listRepoTags.adoc[leveloffset=+3] include::modules/api-team.adoc[leveloffset=+2] include::modules/api-team-getOrganizationTeamPermissions.adoc[leveloffset=+3] -include::modules/api-team-deleteOrganizationTeamMember.adoc[leveloffset=+3] include::modules/api-team-updateOrganizationTeamMember.adoc[leveloffset=+3] +include::modules/api-team-deleteOrganizationTeamMember.adoc[leveloffset=+3] include::modules/api-team-getOrganizationTeamMembers.adoc[leveloffset=+3] -include::modules/api-team-deleteOrganizationTeam.adoc[leveloffset=+3] +include::modules/api-team-inviteTeamMemberEmail.adoc[leveloffset=+3] +include::modules/api-team-deleteTeamMemberEmailInvite.adoc[leveloffset=+3] include::modules/api-team-updateOrganizationTeam.adoc[leveloffset=+3] +include::modules/api-team-deleteOrganizationTeam.adoc[leveloffset=+3] include::modules/api-trigger.adoc[leveloffset=+2] include::modules/api-trigger-activateBuildTrigger.adoc[leveloffset=+3] include::modules/api-trigger-listTriggerRecentBuilds.adoc[leveloffset=+3] include::modules/api-trigger-manuallyStartBuildTrigger.adoc[leveloffset=+3] include::modules/api-trigger-getBuildTrigger.adoc[leveloffset=+3] -include::modules/api-trigger-deleteBuildTrigger.adoc[leveloffset=+3] include::modules/api-trigger-updateBuildTrigger.adoc[leveloffset=+3] +include::modules/api-trigger-deleteBuildTrigger.adoc[leveloffset=+3] include::modules/api-trigger-listBuildTriggers.adoc[leveloffset=+3] include::modules/api-user.adoc[leveloffset=+2] -include::modules/api-user-listStarredRepos.adoc[leveloffset=+3] include::modules/api-user-createStar.adoc[leveloffset=+3] +include::modules/api-user-listStarredRepos.adoc[leveloffset=+3] include::modules/api-user-getLoggedInUser.adoc[leveloffset=+3] include::modules/api-user-deleteStar.adoc[leveloffset=+3] include::modules/api-user-getUserInformation.adoc[leveloffset=+3] include::modules/api-definitions.adoc[leveloffset=+2] + +// do not remove +[id="api-config-examples"] +== API configuration examples + +include::modules/external-registry-config-api-example.adoc[leveloffset=+2] +include::modules/root-rule-config-api-example.adoc[leveloffset=+2] diff --git a/architecture/images b/architecture/images new file mode 120000 index 000000000..5e6757319 --- /dev/null +++ b/architecture/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/architecture/master.adoc b/architecture/master.adoc index a7459c0e4..f777281a2 100644 --- a/architecture/master.adoc +++ b/architecture/master.adoc @@ -1,44 +1,44 @@ include::modules/attributes.adoc[] -[id='quay-architecture'] -= {productname} Architecture - - +[id="quay-architecture"] += {productname} architecture include::modules/arch-intro.adoc[leveloffset=+1] +include::modules/arch-intro-scalability.adoc[leveloffset=+2] +include::modules/arch-intro-content-distribution.adoc[leveloffset=+2] +include::modules/arch-intro-build-automation.adoc[leveloffset=+2] +include::modules/arch-intro-integration.adoc[leveloffset=+2] +include::modules/arch-intro-security.adoc[leveloffset=+2] +include::modules/arch-intro-recent-features.adoc[leveloffset=+2] -include::modules/arch-core-intro.adoc[leveloffset=+1] -include::modules/core-infrastructure.adoc[leveloffset=+2] -include::modules/core-distinct-registries.adoc[leveloffset=+3] +include::modules/arch-prereqs.adoc[leveloffset=+1] +include::modules/core-prereqs-storage.adoc[leveloffset=+2] +include::modules/core-prereqs-db.adoc[leveloffset=+2] +include::modules/core-prereqs-redis.adoc[leveloffset=+2] -include::modules/core-prereqs.adoc[leveloffset=+2] -include::modules/core-prereqs-storage.adoc[leveloffset=+3] -include::modules/core-prereqs-db.adoc[leveloffset=+3] -include::modules/core-prereqs-redis.adoc[leveloffset=+3] +include::modules/core-infrastructure.adoc[leveloffset=+1] +include::modules/arch-mirror-registry.adoc[leveloffset=+2] +include::modules/core-distinct-registries.adoc[leveloffset=+2] - -include::modules/core-sample-quay-on-prem.adoc[leveloffset=+2] +include::modules/core-sample-quay-on-prem.adoc[leveloffset=+1] include::modules/core-example-deployment.adoc[leveloffset=+2] include::modules/deployment-topology.adoc[leveloffset=+2] include::modules/deployment-topology-with-storage-proxy.adoc[leveloffset=+2] -include::modules/public-cloud-intro.adoc[leveloffset=+2] -include::modules/public-cloud-aws.adoc[leveloffset=+3] -include::modules/public-cloud-azure.adoc[leveloffset=+3] - +include::modules/public-cloud-intro.adoc[leveloffset=+1] +include::modules/public-cloud-aws.adoc[leveloffset=+2] +include::modules/public-cloud-azure.adoc[leveloffset=+2] - - -include::modules/security-intro.adoc[leveloffset=+1] -include::modules/clair-intro.adoc[leveloffset=+2] -include::modules/clair-analyses.adoc[leveloffset=+3] -include::modules/clairv4-intro.adoc[leveloffset=+2] -include::modules/clairv4-arch.adoc[leveloffset=+3] -include::modules/clairv2-compare-v4.adoc[leveloffset=+3] -include::modules/clairv2-to-v4.adoc[leveloffset=+3] -include::modules/clairv4-limitations.adoc[leveloffset=+3] -include::modules/clairv4-air-gapped.adoc[leveloffset=+3] +//include::modules/security-intro.adoc[leveloffset=+1] +//include::modules/clair-intro.adoc[leveloffset=+2] +//include::modules/clair-analyses.adoc[leveloffset=+3] +//include::modules/clairv4-intro.adoc[leveloffset=+2] +//include::modules/clairv4-arch.adoc[leveloffset=+3] +//include::modules/clairv2-compare-v4.adoc[leveloffset=+3] +//include::modules/clairv2-to-v4.adoc[leveloffset=+3] +//include::modules/clairv4-limitations.adoc[leveloffset=+3] +//include::modules/clairv4-air-gapped.adoc[leveloffset=+3] include::modules/content-distrib-intro.adoc[leveloffset=+1] //mirroring @@ -56,32 +56,10 @@ include::modules/georepl-arch-operator.adoc[leveloffset=+3] include::modules/georepl-mixed-storage.adoc[leveloffset=+3] include::modules/mirroring-versus-georepl.adoc[leveloffset=+2] include::modules/airgap-intro.adoc[leveloffset=+2] -include::modules/airgap-clair.adoc[leveloffset=+3] -//access control -include::modules/access-control-intro.adoc[leveloffset=+1] -include::modules/tenancy-model.adoc[leveloffset=+2] -include::modules/repo-organizations-and-users-intro.adoc[leveloffset=+2] -include::modules/quay-users-intro.adoc[leveloffset=+3] -include::modules/quay-robot-accounts-intro.adoc[leveloffset=+3] -include::modules/quay-super-users-intro.adoc[leveloffset=+3] -include::modules/permissions-intro.adoc[leveloffset=+2] -include::modules/role-based-access-control-intro.adoc[leveloffset=+3] -include::modules/fine-grained-access-control-intro.adoc[leveloffset=+3] -include::modules/ldap-binding-groups-intro.adoc[leveloffset=+4] -include::modules/ldap-filtering-intro.adoc[leveloffset=+4] -include::modules/quay-sso-keycloak-intro.adoc[leveloffset=+4] +//include::modules/airgap-clair.adoc[leveloffset=+3] + //sizing include::modules/sizing-intro.adoc[leveloffset=+1] include::modules/sizing-sample.adoc[leveloffset=+2] include::modules/subscription-intro.adoc[leveloffset=+2] - include::modules/quay-internal-registry-intro.adoc[leveloffset=+2] - - - -include::modules/scalability-intro.adoc[leveloffset=+1] - - -include::modules/build-automation-intro.adoc[leveloffset=+1] - -include::modules/integration-intro.adoc[leveloffset=+1] diff --git a/build_docs b/build_docs index b52494827..ebed327a7 100755 --- a/build_docs +++ b/build_docs @@ -1,13 +1,20 @@ rm -rf dist asciidoctor -a productname="Project Quay" -d book welcome.adoc -D dist -o welcome.html + +asciidoctor -a productname="Project Quay" -a toc="left" -d book release_notes/master.adoc -D dist -o release_notes.html + asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_quay/master.adoc -D dist -o deploy_quay.html asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_quay_ha/master.adoc -D dist -o deploy_quay_ha.html +asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_quay_on_openshift_op_tng/master.adoc -D dist -o deploy_quay_on_openshift_op_tng.html + +asciidoctor -a productname="Project Quay" -a toc="left" -d book config_quay/master.adoc -D dist -o config_quay.html asciidoctor -a productname="Project Quay" -a toc="left" -d book manage_quay/master.adoc -D dist -o manage_quay.html asciidoctor -a productname="Project Quay" -a toc="left" -d book upgrade_quay/master.adoc -D dist -o upgrade_quay.html + asciidoctor -a productname="Project Quay" -a toc="left" -d book use_quay/master.adoc -D dist -o use_quay.html -asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_quay_on_openshift_op_tng/master.adoc -D dist -o deploy_quay_on_openshift_op_tng.html -asciidoctor -a productname="Project Quay" -a toc="left" -d book release_notes/master.adoc -D dist -o release_notes.html +asciidoctor -a productname="Project Quay" -a toc="left" -d book api/master.adoc -D dist -o api_quay.html + asciidoctor -a productname="Project Quay" -a toc="left" -d book build_quay/master.adoc -D dist -o build_quay.html cp -a images dist/images diff --git a/clair/docinfo.xml b/clair/docinfo.xml new file mode 100644 index 000000000..457153dd4 --- /dev/null +++ b/clair/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Vulnerability reporting with Clair on {productname} + + Get started with {productname} + + + Red Hat OpenShift Documentation Team + + diff --git a/clair/master.adoc b/clair/master.adoc new file mode 100644 index 000000000..d3751de3f --- /dev/null +++ b/clair/master.adoc @@ -0,0 +1,82 @@ +:_content-type: ASSEMBLY + +include::modules/attributes.adoc[] + +[id="vulnerability-reporting-clair-quay"] += Vulnerability reporting with Clair on {productname} + +The contents within this guide provide an overview of Clair for {productname}, running Clair on standalone {productname} and Operator deployments, and advanced Clair configuration. + +[id="vulnerability-reporting-clair-quay-overview"] += Vulnerability reporting with Clair on {productname} overview + +The content in this guide explains the key purposes and concepts of Clair on {productname}. It also contains information about Clair releases and the location of official Clair containers. + +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2] +include::modules/clair-concepts.adoc[leveloffset=+1] +// include::modules/internal-api.adoc[leveloffset=+2] +include::modules/clair-authentication.adoc[leveloffset=+2] +//include::modules/testing-clair.adoc[leveloffset=+1] +include::modules/clair-updaters.adoc[leveloffset=+2] +include::modules/clair-updater-urls.adoc[leveloffset=+3] +include::modules/about-clair.adoc[leveloffset=+1] +include::modules/clair-cve.adoc[leveloffset=+2] +include::modules/fips-overview.adoc[leveloffset=+2] + +[id="testing-clair-with-quay"] += Clair on {productname} + +This guide contains procedures for running Clair on {productname} in both standalone and {ocp} Operator deployments. + +include::modules/clair-standalone-configure.adoc[leveloffset=+1] + +include::modules/clair-openshift.adoc[leveloffset=+1] +// include::modules/clair-openshift-manual.adoc[leveloffset=+2] + +include::modules/clair-testing.adoc[leveloffset=+1] + + +[id="advanced-clair-configuration"] += Advanced Clair configuration + +Use this section to configure advanced Clair features. + +include::modules/clair-unmanaged.adoc[leveloffset=+1] +include::modules/unmanaging-clair-database.adoc[leveloffset=+2] +include::modules/configuring-custom-clair-database.adoc[leveloffset=+2] + +include::modules/custom-clair-configuration-managed-database.adoc[leveloffset=+1] +include::modules/managed-clair-database.adoc[leveloffset=+2] +include::modules/configuring-custom-clair-database-managed.adoc[leveloffset=+2] + +include::modules/clair-disconnected.adoc[leveloffset=+1] + + +include::modules/clair-clairctl.adoc[leveloffset=+2] +include::modules/clair-openshift-config.adoc[leveloffset=+3] +include::modules/clair-export-bundle.adoc[leveloffset=+3] +include::modules/clair-openshift-airgap-database.adoc[leveloffset=+3] +include::modules/clair-openshift-airgap-import-bundle.adoc[leveloffset=+3] + + +include::modules/clair-clairctl-standalone.adoc[leveloffset=+2] +include::modules/clair-standalone-config-location.adoc[leveloffset=+3] +include::modules/clair-export-bundle-standalone.adoc[leveloffset=+3] +include::modules/clair-openshift-airgap-database-standalone.adoc[leveloffset=+3] +include::modules/clair-openshift-airgap-import-bundle-standalone.adoc[leveloffset=+3] + +include::modules/clair-crda-configuration.adoc[leveloffset=+2] +include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+2] + + +include::modules/config-fields-overview.adoc[leveloffset=+1] +include::modules/config-fields-general-clair.adoc[leveloffset=+2] +include::modules/config-fields-clair-indexer.adoc[leveloffset=+2] +include::modules/config-fields-clair-matcher.adoc[leveloffset=+2] +include::modules/config-fields-clair-matchers.adoc[leveloffset=+2] +include::modules/config-fields-clair-updaters.adoc[leveloffset=+2] +include::modules/config-fields-clair-notifiers.adoc[leveloffset=+2] +include::modules/config-fields-clair-auth.adoc[leveloffset=+2] +include::modules/config-fields-clair-trace.adoc[leveloffset=+2] +include::modules/config-fields-clair-metrics.adoc[leveloffset=+2] \ No newline at end of file diff --git a/clair/modules b/clair/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/clair/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/config_quay/master.adoc b/config_quay/master.adoc index 44d53a001..b5c5c42e8 100644 --- a/config_quay/master.adoc +++ b/config_quay/master.adoc @@ -1,23 +1,23 @@ -include::modules/attributes.adoc[] +:_content-type: ASSEMBLY -[id='configure-quay'] -= Configure {productname} +include::modules/attributes.adoc[] +[id="configure-quay"] += Configure {productname} include::modules/config-intro.adoc[leveloffset=+1] +include::modules/config-updates-38.adoc[leveloffset=+2] +include::modules/config-updates-37.adoc[leveloffset=+2] include::modules/config-updates-36.adoc[leveloffset=+2] include::modules/config-file-intro.adoc[leveloffset=+2] include::modules/config-file-location.adoc[leveloffset=+2] include::modules/config-file-minimal.adoc[leveloffset=+2] - include::modules/config-fields-intro.adoc[leveloffset=+1] include::modules/config-fields-required-intro.adoc[leveloffset=+2] include::modules/config-preconfigure-automation-intro.adoc[leveloffset=+2] include::modules/config-fields-optional-intro.adoc[leveloffset=+2] - - include::modules/config-fields-required-general.adoc[leveloffset=+2] include::modules/config-fields-db.adoc[leveloffset=+2] include::modules/config-fields-storage.adoc[leveloffset=+2] @@ -32,11 +32,18 @@ include::modules/config-fields-storage-azure.adoc[leveloffset=+3] include::modules/config-fields-storage-swift.adoc[leveloffset=+3] include::modules/config-fields-redis.adoc[leveloffset=+2] +include::modules/config-fields-modelcache.adoc[leveloffset=+2] +include::modules/config-fields-modelcache-memcache.adoc[leveloffset=+3] +include::modules/config-fields-modelcache-single-redis.adoc[leveloffset=+3] +include::modules/config-fields-modelcache-clustered-redis.adoc[leveloffset=+3] include::modules/config-fields-tag-expiration.adoc[leveloffset=+2] include::modules/config-preconfigure-automation.adoc[leveloffset=+2] -include::modules/api-first-user.adoc[leveloffset=+3] +include::modules/deploying-the-operator-using-initial-configuration.adoc[leveloffset=+2] +include::modules/first-user-api.adoc[leveloffset=+3] +include::modules/using-the-oauth-token.adoc[leveloffset=+3] +include::modules/using-the-api-to-create-an-organization.adoc[leveloffset=+3] include::modules/config-fields-basic.adoc[leveloffset=+2] //include::modules/config-fields-server.adoc[leveloffset=+2] @@ -60,8 +67,9 @@ include::modules/config-fields-jwt.adoc[leveloffset=+2] include::modules/config-fields-app-tokens.adoc[leveloffset=+2] include::modules/config-fields-misc.adoc[leveloffset=+2] include::modules/config-fields-legacy.adoc[leveloffset=+2] - - +include::modules/config-fields-v2-ui.adoc[leveloffset=+2] +include::modules/config-fields-ipv6.adoc[leveloffset=+2] +include::modules/config-fields-branding.adoc[leveloffset=+2] include::modules/config-envvar-intro.adoc[leveloffset=+1] include::modules/config-envvar-georepl.adoc[leveloffset=+2] @@ -85,13 +93,47 @@ include::modules/config-custom-ssl-certs-kubernetes.adoc[leveloffset=+2] include::modules/operator-helm-oci.adoc[leveloffset=+2] include::modules/operator-volume-size-overrides.adoc[leveloffset=+2] +// Clair + +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/config-fields-overview.adoc[leveloffset=+2] +include::modules/config-fields-general-clair.adoc[leveloffset=+3] +include::modules/config-fields-clair-indexer.adoc[leveloffset=+3] +include::modules/config-fields-clair-matcher.adoc[leveloffset=+3] +include::modules/config-fields-clair-matchers.adoc[leveloffset=+3] +include::modules/config-fields-clair-updaters.adoc[leveloffset=+3] +include::modules/config-fields-clair-notifiers.adoc[leveloffset=+3] +include::modules/config-fields-clair-auth.adoc[leveloffset=+3] +include::modules/config-fields-clair-trace.adoc[leveloffset=+3] +include::modules/config-fields-clair-metrics.adoc[leveloffset=+3] + +//// +include::modules/clair-advanced-configuration-overview.adoc[leveloffset=+2] +include::modules/clair-unmanaged.adoc[leveloffset=+3] +include::modules/unmanaging-clair-database.adoc[leveloffset=+4] +include::modules/clair-crda-configuration.adoc[leveloffset=+3] +include::modules/clair-disconnected.adoc[leveloffset=+3] +include::modules/configuring-clair-disconnected-environment.adoc[leveloffset=+4] +include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+4] +include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+3] +include::modules/clair-add-info.adoc[leveloffset=+3] +//// + +include::modules/proc_container-security-operator-setup.adoc[leveloffset=+1] + +ifeval::["{productname}" == "Project Quay"] +include::modules/proc_manage-security-scanning.adoc[leveloffset=+1] +include::modules/proc_manage-clair-enable.adoc[leveloffset=+1] +endif::[] + +//// + include::modules/config-api-intro.adoc[leveloffset=+1] include::modules/config-api-default.adoc[leveloffset=+2] include::modules/config-api-retrieve.adoc[leveloffset=+2] include::modules/config-api-validate.adoc[leveloffset=+2] include::modules/config-api-required.adoc[leveloffset=+2] - // TODO 36 Redo config tool images include::modules/config-ui-intro.adoc[leveloffset=+1] include::modules/config-ui-custom-ssl-certs.adoc[leveloffset=+2] @@ -117,4 +159,4 @@ include::modules/config-ui-internal-authentication.adoc[leveloffset=+2] include::modules/config-ui-oauth.adoc[leveloffset=+2] include::modules/config-ui-access-settings.adoc[leveloffset=+2] include::modules/config-ui-dockerfile-build.adoc[leveloffset=+2] -//include::modules/ssl-config-ui.adoc[leveloffset=+2] +//include::modules/ssl-config-ui.adoc[leveloffset=+2] \ No newline at end of file diff --git a/deploy_quay/master.adoc b/deploy_quay/master.adoc index 6137c4954..60bf150e7 100644 --- a/deploy_quay/master.adoc +++ b/deploy_quay/master.adoc @@ -1,16 +1,17 @@ +:_content-type: ASSEMBLY include::modules/attributes.adoc[] -[id='deploy-quay-single'] +[id="deploy-quay-single"] = Deploy {productname} for proof-of-concept (non-production) purposes {productname} is an enterprise-quality registry for building, securing and serving container images. This procedure describes how to deploy {productname} for proof-of-concept (non-production) purposes. include::modules/con_quay_intro.adoc[leveloffset=+1] - +[id="poc-getting-started"] == Getting started with {productname} -The {productname} registry can be deployed for non-production purposes on a single machine, either physical or virtual, with the following specifications. +The {productname} registry can be deployed for non-production purposes on a single machine, either physical or virtual. include::modules/con_quay_single_prereq.adoc[leveloffset=+2] @@ -32,10 +33,10 @@ include::modules/proc_deploy_quay_poc_use.adoc[leveloffset=+2] == Advanced {productname} deployment +Use the following sections to configure advanced {productname} settings. -=== Using SSL to protect connections to {productname} //include::modules/proc_manage-quay-ssl.adoc[leveloffset=+1] -include::modules/ssl-intro.adoc[leveloffset=+3] +include::modules/ssl-intro.adoc[leveloffset=+2] include::modules/ssl-create-certs.adoc[leveloffset=+3] include::modules/ssl-config-ui.adoc[leveloffset=+3] include::modules/ssl-config-cli.adoc[leveloffset=+3] @@ -58,14 +59,15 @@ include::modules/mirroring-creating-repo.adoc[leveloffset=+3] include::modules/mirroring-tag-patterns.adoc[leveloffset=+3] -include::modules/clair-standalone-intro.adoc[leveloffset=+2] -include::modules/clair-standalone-database.adoc[leveloffset=+3] -include::modules/clair-standalone-quay-config.adoc[leveloffset=+3] -include::modules/clair-standalone-config.adoc[leveloffset=+3] -include::modules/clair-standalone-running.adoc[leveloffset=+3] -include::modules/clair-standalone-using.adoc[leveloffset=+3] +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+2] +include::modules/clair-standalone-configure.adoc[leveloffset=+3] +include::modules/clair-testing.adoc[leveloffset=+3] include::modules/clair-cve.adoc[leveloffset=+3] +.Additional resources + +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#vulnerability-reporting-clair-quay-overview[Vulnerability reporting with Clair on {productname}] + include::modules/proc_deploy_quay_poc_restart.adoc[leveloffset=+2] diff --git a/deploy_quay_on_openshift_op_tng/master.adoc b/deploy_quay_on_openshift_op_tng/master.adoc index 978746fd6..11a3efe5a 100644 --- a/deploy_quay_on_openshift_op_tng/master.adoc +++ b/deploy_quay_on_openshift_op_tng/master.adoc @@ -1,7 +1,7 @@ include::modules/attributes.adoc[] -[id='deploy-quay-on-openshift-op-tng'] -= Deploy {productname} on OpenShift with the Quay Operator +[id="deploy-quay-on-openshift-op-tng"] += Deploy {productname} on {ocp} with the {productname} Operator {productname} is an enterprise-quality container registry. Use {productname} to build and store container images, then make them available to deploy across your enterprise. @@ -31,16 +31,13 @@ include::modules/config-preconfigure-automation.adoc[leveloffset=+2] include::modules/operator-preconfig-storage.adoc[leveloffset=+2] include::modules/operator-unmanaged-storage.adoc[leveloffset=+3] -include::modules/config-fields-storage-aws.adoc[leveloffset=+4] -include::modules/config-fields-storage-gcp.adoc[leveloffset=+4] -include::modules/config-fields-storage-azure.adoc[leveloffset=+4] -include::modules/operator-unmanaged-storage-noobaa.adoc[leveloffset=+4] include::modules/operator-managed-storage.adoc[leveloffset=3] ifeval::["{productname}" == "Red Hat Quay"] include::modules/operator-standalone-object-gateway.adoc[leveloffset=4] endif::[] //Database +[id="configuring-the-database-poc"] === Configuring the database include::modules/operator-unmanaged-postgres.adoc[leveloffset=+3] include::modules/config-fields-db.adoc[leveloffset=+3] @@ -49,19 +46,23 @@ include::modules/operator-preconfig-tls-routes.adoc[leveloffset=+2] //* The Operator will deploy an OpenShift `Route` as the default entrypoint to the registry. If you prefer a different entrypoint (e.g. `Ingress` or direct `Service` access that configuration will need to be done manually). include::modules/operator-components-unmanaged-other.adoc[leveloffset=+2] include::modules/operator-unmanaged-redis.adoc[leveloffset=+3] -include::modules/config-fields-redis.adoc[leveloffset=+4] + +[role="_additional-resources"] +.Additional resources + +xref:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/configure_red_hat_quay/index#config-fields-redis[Redis configuration fields] + include::modules/operator-unmanaged-hpa.adoc[leveloffset=+3] include::modules/operator-unmanaged-route.adoc[leveloffset=+3] include::modules/operator-unmanaged-monitoring.adoc[leveloffset=+3] include::modules/operator-unmanaged-mirroring.adoc[leveloffset=+3] - include::modules/operator-deploy.adoc[leveloffset=+1] include::modules/operator-deploy-cli.adoc[leveloffset=+2] include::modules/operator-deploy-view-pods-cli.adoc[leveloffset=+3] include::modules/operator-deploy-hpa.adoc[leveloffset=+3] -// include::modules/api-first-user.adoc[leveloffset=+3] +include::modules/first-user-api.adoc[leveloffset=+3] include::modules/operator-monitor-deploy-cli.adoc[leveloffset=+3] include::modules/operator-deploy-ui.adoc[leveloffset=+2] @@ -95,16 +96,10 @@ include::modules/operator-external-access.adoc[leveloffset=+2] include::modules/operator-console-monitoring-alerting.adoc[leveloffset=+2] - -include::modules/clair-openshift-airgap-update.adoc[leveloffset=+2] -include::modules/clair-clairctl.adoc[leveloffset=+3] -==== Retrieving the Clair config -include::modules/clair-openshift-config.adoc[leveloffset=+4] -include::modules/clair-standalone-config-location.adoc[leveloffset=+4] -include::modules/clair-export-bundle.adoc[leveloffset=+3] -include::modules/clair-openshift-airgap-database.adoc[leveloffset=+3] -include::modules/clair-openshift-airgap-import-bundle.adoc[leveloffset=+3] - +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+2] +include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+3] +include::modules/clair-openshift.adoc[leveloffset=+3] +include::modules/clair-testing.adoc[leveloffset=+3] include::modules/fips-overview.adoc[leveloffset=+2] @@ -116,6 +111,39 @@ include::modules/operator-resize-storage.adoc[leveloffset=+2] include::modules/operator-customize-images.adoc[leveloffset=+2] include::modules/operator-cloudfront.adoc[leveloffset=+2] +include::modules/clair-advanced-configuration-overview.adoc[leveloffset=+2] + +include::modules/clair-unmanaged.adoc[leveloffset=+3] +include::modules/unmanaging-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database.adoc[leveloffset=+4] + + +include::modules/custom-clair-configuration-managed-database.adoc[leveloffset=+3] +include::modules/managed-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database-managed.adoc[leveloffset=+4] + +include::modules/clair-disconnected.adoc[leveloffset=+3] +include::modules/clair-clairctl.adoc[leveloffset=+4] +include::modules/clair-openshift-config.adoc[leveloffset=+5] +include::modules/clair-export-bundle.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-database.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-import-bundle.adoc[leveloffset=+5] + +include::modules/clair-clairctl-standalone.adoc[leveloffset=+4] +include::modules/clair-standalone-config-location.adoc[leveloffset=+5] +include::modules/clair-export-bundle-standalone.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-database-standalone.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-import-bundle-standalone.adoc[leveloffset=+5] + +include::modules/clair-crda-configuration.adoc[leveloffset=+3] +include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+3] + +//// +include::modules/clair-unmanaged.adoc[leveloffset=+3] +include::modules/unmanaging-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database.adoc[leveloffset=+4] +//// + include::modules/build-enhancements.adoc[leveloffset=+1] include::modules/build-enhanced-arch.adoc[leveloffset=+2] include::modules/build-limitations.adoc[leveloffset=+2] @@ -127,6 +155,12 @@ include::modules/georepl-arch-operator.adoc[leveloffset=+2] include::modules/georepl-deploy-operator.adoc[leveloffset=+3] include::modules/georepl-mixed-storage.adoc[leveloffset=+3] +include::modules/backing-up-and-restoring-intro.adoc[leveloffset=+1] +include::modules/backing-up-red-hat-quay-operator.adoc[leveloffset=+2] +include::modules/restoring-red-hat-quay.adoc[leveloffset=+2] + +include::modules/operator-ipv6-dual-stack.adoc[leveloffset=+1] + include::modules/operator-upgrade.adoc[leveloffset=+1] diff --git a/early_access/master.adoc b/early_access/master.adoc index 0a154438c..4dcd5e675 100644 --- a/early_access/master.adoc +++ b/early_access/master.adoc @@ -34,7 +34,7 @@ include::modules/quota-management-arch.adoc[leveloffset=+3] include::modules/quota-management-limitations.adoc[leveloffset=+3] include::modules/config-fields-quota.adoc[leveloffset=+3] -//include::modules/quota-establishment-ui.adoc[leveloffset=+3] +include::modules/quota-establishment-ui.adoc[leveloffset=+3] include::modules/quota-establishment-api.adoc[leveloffset=+3] include::modules/api-namespacequota.adoc[leveloffset=+2] diff --git a/images/178_Quay_architecture_0821_georeplication_openshift-temp.png b/images/178_Quay_architecture_0821_georeplication_openshift-temp.png new file mode 100644 index 000000000..b646f1fd3 Binary files /dev/null and b/images/178_Quay_architecture_0821_georeplication_openshift-temp.png differ diff --git a/images/38-ui-toggle.png b/images/38-ui-toggle.png new file mode 100644 index 000000000..d60184aed Binary files /dev/null and b/images/38-ui-toggle.png differ diff --git a/images/garbage-collection-metrics.png b/images/garbage-collection-metrics.png new file mode 100644 index 000000000..b10de0d82 Binary files /dev/null and b/images/garbage-collection-metrics.png differ diff --git a/images/operator-config-bundle-edit-secret.png b/images/operator-config-bundle-edit-secret.png new file mode 100644 index 000000000..1eb81899f Binary files /dev/null and b/images/operator-config-bundle-edit-secret.png differ diff --git a/images/operator-quay-registry-overview.png b/images/operator-quay-registry-overview.png new file mode 100644 index 000000000..21b188f6d Binary files /dev/null and b/images/operator-quay-registry-overview.png differ diff --git a/images/operator-save-config-changes.png b/images/operator-save-config-changes.png new file mode 100644 index 000000000..96efbf3e0 Binary files /dev/null and b/images/operator-save-config-changes.png differ diff --git a/images/quota-none-org-settings.png b/images/quota-none-org-settings.png new file mode 100644 index 000000000..6d01c3dae Binary files /dev/null and b/images/quota-none-org-settings.png differ diff --git a/images/quota-notifications.png b/images/quota-notifications.png index ec5595bf5..900e983c9 100644 Binary files a/images/quota-notifications.png and b/images/quota-notifications.png differ diff --git a/images/quota-org-consumed-first.png b/images/quota-org-consumed-first.png new file mode 100644 index 000000000..3c70c1cfc Binary files /dev/null and b/images/quota-org-consumed-first.png differ diff --git a/images/quota-org-consumed-second.png b/images/quota-org-consumed-second.png new file mode 100644 index 000000000..8e75e9a40 Binary files /dev/null and b/images/quota-org-consumed-second.png differ diff --git a/images/quota-org-init-consumed.png b/images/quota-org-init-consumed.png new file mode 100644 index 000000000..dd38584d4 Binary files /dev/null and b/images/quota-org-init-consumed.png differ diff --git a/images/quota-org-quota-policy.png b/images/quota-org-quota-policy.png new file mode 100644 index 000000000..2bcff31d0 Binary files /dev/null and b/images/quota-org-quota-policy.png differ diff --git a/images/quota-second-image.png b/images/quota-second-image.png deleted file mode 100644 index 2013c5a49..000000000 Binary files a/images/quota-second-image.png and /dev/null differ diff --git a/images/quota-su-consumed-first.png b/images/quota-su-consumed-first.png new file mode 100644 index 000000000..e34c0afbd Binary files /dev/null and b/images/quota-su-consumed-first.png differ diff --git a/images/quota-su-increase-100MB.png b/images/quota-su-increase-100MB.png new file mode 100644 index 000000000..91258705d Binary files /dev/null and b/images/quota-su-increase-100MB.png differ diff --git a/images/quota-su-init-10MB.png b/images/quota-su-init-10MB.png new file mode 100644 index 000000000..cc6272d97 Binary files /dev/null and b/images/quota-su-init-10MB.png differ diff --git a/images/quota-su-init-consumed.png b/images/quota-su-init-consumed.png new file mode 100644 index 000000000..0daf93653 Binary files /dev/null and b/images/quota-su-init-consumed.png differ diff --git a/images/quota-su-org-options.png b/images/quota-su-org-options.png new file mode 100644 index 000000000..caa2510c1 Binary files /dev/null and b/images/quota-su-org-options.png differ diff --git a/images/quota-su-reject-80.png b/images/quota-su-reject-80.png new file mode 100644 index 000000000..22f7fe4e8 Binary files /dev/null and b/images/quota-su-reject-80.png differ diff --git a/images/quota-su-warning-70.png b/images/quota-su-warning-70.png new file mode 100644 index 000000000..74e7535ba Binary files /dev/null and b/images/quota-su-warning-70.png differ diff --git a/images/repo-mirror-details-start.png b/images/repo-mirror-details-start.png index 9d872d941..409e63571 100644 Binary files a/images/repo-mirror-details-start.png and b/images/repo-mirror-details-start.png differ diff --git a/images/toggle-legacy-ui.png b/images/toggle-legacy-ui.png new file mode 100644 index 000000000..9bfa2a932 Binary files /dev/null and b/images/toggle-legacy-ui.png differ diff --git a/manage_quay/master.adoc b/manage_quay/master.adoc index dd09820db..90602476d 100644 --- a/manage_quay/master.adoc +++ b/manage_quay/master.adoc @@ -1,8 +1,7 @@ +:_content-type: ASSEMBLY include::modules/attributes.adoc[] -:context: manage_quay - -[id='manage-quay'] +[id="manage-quay"] = Manage {productname} Once you have deployed a {productname} registry, there are many ways you can @@ -10,17 +9,19 @@ further configure and manage that deployment. Topics covered here include: * Advanced {productname} configuration * Setting notifications to alert you of a new {productname} release -* Securing connections with SSL and TLS certificates +* Securing connections with SSL/TLS certificates * Directing action logs storage to Elasticsearch * Configuring image security scanning with Clair * Scan pod images with the Container Security Operator -* Integrate {productname} into OpenShift with the Quay Bridge Operator +* Integrate {productname} into {ocp} with the Quay Bridge Operator * Mirroring images with repository mirroring -* Sharing Quay images with a BitTorrent service +* Sharing {productname} images with a BitTorrent service * Authenticating users with LDAP * Enabling Quay for Prometheus and Grafana metrics * Setting up geo-replication -* Troubleshooting Quay +* Troubleshooting {productname} + +For a complete list of {productname} configuration fields, see the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index[Configure {productname}] page. include::modules/proc_manage-advanced-config.adoc[leveloffset=+1] @@ -46,40 +47,30 @@ include::modules/ssl-testing-ui.adoc[leveloffset=+2] include::modules/ssl-trust-ca-podman.adoc[leveloffset=+2] include::modules/ssl-trust-ca-system.adoc[leveloffset=+2] - include::modules/config-custom-ssl-certs-manual.adoc[leveloffset=+1] include::modules/config-custom-ssl-certs-kubernetes.adoc[leveloffset=+2] - - - include::modules/proc_manage-log-storage.adoc[leveloffset=+1] +include::modules/proc_manage-log-storage-elasticsearch.adoc[leveloffset=+2] +include::modules/proc_manage-log-storage-splunk.adoc[leveloffset=+2] :context: security-scanning - -include::modules/clair-intro2.adoc[leveloffset=+1] +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2] +include::modules/clair-standalone-configure.adoc[leveloffset=+2] include::modules/clair-openshift.adoc[leveloffset=+2] -include::modules/clair-openshift-manual.adoc[leveloffset=+3] -include::modules/clair-standalone.adoc[leveloffset=+2] -include::modules/clair-unmanaged.adoc[leveloffset=+2] -include::modules/clair-using.adoc[leveloffset=+2] -include::modules/clair-cve.adoc[leveloffset=+2] -include::modules/clair-disconnected.adoc[leveloffset=+2] -include::modules/clair-updater-urls.adoc[leveloffset=+2] -include::modules/clair-add-info.adoc[leveloffset=+2] - - -include::modules/proc_container-security-operator-setup.adoc[leveloffset=+1] - -ifeval::["{productname}" == "Project Quay"] -include::modules/proc_manage-security-scanning.adoc[leveloffset=+1] -include::modules/proc_manage-clair-enable.adoc[leveloffset=+1] -endif::[] +include::modules/clair-testing.adoc[leveloffset=+2] :context: manage_quay -include::modules/proc_quay-bridge-operator.adoc[leveloffset=+1] +include::modules/conc_quay-bridge-operator.adoc[leveloffset=+1] +include::modules/proc_setting-up-quay-for-qbo.adoc[leveloffset=+2] +include::modules/proc_installing-qbo-on-ocp.adoc[leveloffset=+2] +include::modules/proc_creating-ocp-secret-for-oauth-token.adoc[leveloffset=+2] +include::modules/proc_creating-quay-integration-cr.adoc[leveloffset=+2] +include::modules/ref_quay-integration-config-fields.adoc[leveloffset=+2] + [[repo-mirroring-in-red-hat-quay]] == Repository mirroring @@ -97,6 +88,9 @@ include::modules/mirroring-tag-patterns.adoc[leveloffset=+2] include::modules/mirroring-working-with.adoc[leveloffset=+2] include::modules/mirroring-recommend.adoc[leveloffset=+2] +:context: manage_quay + +include::modules/proc_manage-ipv6-dual-stack.adoc[leveloffset=+1] :context: manage_quay @@ -114,11 +108,11 @@ include::modules/metrics-authentication.adoc[leveloffset=+3] //include::modules/proc_manage-quay-geo-replication.adoc[leveloffset=+1] include::modules/quota-management-and-enforcement.adoc[leveloffset=+1] +include::modules/config-fields-quota.adoc[leveloffset=+2] include::modules/quota-management-arch.adoc[leveloffset=+2] +include::modules/quota-establishment-ui.adoc[leveloffset=+2] +include::modules/quota-establishment-api.adoc[leveloffset=+2] include::modules/quota-management-limitations.adoc[leveloffset=+2] -include::modules/config-fields-quota.adoc[leveloffset=+3] -//include::modules/quota-establishment-ui.adoc[leveloffset=+3] -include::modules/quota-establishment-api.adoc[leveloffset=+3] include::modules/georepl-intro.adoc[leveloffset=+1] @@ -130,9 +124,26 @@ include::modules/georepl-arch-operator.adoc[leveloffset=+2] include::modules/georepl-deploy-operator.adoc[leveloffset=+3] include::modules/georepl-mixed-storage.adoc[leveloffset=+3] +include::modules/backing-up-and-restoring-intro.adoc[leveloffset=+1] +include::modules/backing-up-red-hat-quay-operator.adoc[leveloffset=+2] +include::modules/restoring-red-hat-quay.adoc[leveloffset=+2] + + +include::modules/standalone-to-operator-backup-restore.adoc[leveloffset=+1] + +include::modules/standalone-deployment-backup-restore.adoc[leveloffset=+1] +include::modules/backing-up-red-hat-quay-standalone.adoc[leveloffset=+2] +include::modules/restoring-red-hat-quay-standalone.adoc[leveloffset=+2] + +include::modules/garbage-collection.adoc[leveloffset=+1] + +//branding +include::modules/branding-quay-deployment.adoc[leveloffset=+1] + include::modules/proc_manage-quay-troubleshooting.adoc[leveloffset=+1] include::modules/con_schema.adoc[leveloffset=+1] + [discrete] == Additional resources diff --git a/modules/about-clair.adoc b/modules/about-clair.adoc new file mode 100644 index 000000000..07666ae6d --- /dev/null +++ b/modules/about-clair.adoc @@ -0,0 +1,130 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="about-clair"] += About Clair + +The content in this section highlights Clair releases, official Clair containers, and information about CVSS enrichment data. + +[id="clair-releases"] +== Clair releases + +New versions of Clair are regularly released. The source code needed to build Clair is packaged as an archive and attached to each release. Clair releases can be found at link:https://github.com/quay/clair/releases[Clair releases]. + + +Release artifacts also include the `clairctl` command line interface tool, which obtains updater data from the internet by using an open host. + +[id="clair-supported-languages"] +== Clair supported languages + +Clair supports the following languages: +* Python +* Java (CRDA must be enabled) + +[id="clair-containers"] +== Clair containers + +Official downstream Clair containers bundled with {productname} can be found on the link:registry.redhat.io[Red Hat Ecosystem Catalog]. + +Official upstream containers are packaged and released as a container at link:quay.io/projectquay/clair[Quay.io/projectquay/clair]. The latest tag tracks the Git development branch. Version tags are built from the corresponding release. + +//// + +[id="notifier-pagination"] +===== Notifier pagination + +The URL returned in the callback field takes the client to a paginated result. + +The following example shows the callback endpoint specification: +[source,json] +---- +GET /notifier/api/v1/notification/{id}?[page_size=N][next=N] +{ + page: { + size: int, + next: string, // if present, the next id to fetch. + } + notifications: [ Notification… ] // array of notifications; max len == page.size +} +---- +.small +-- +* The `GET` callback request implements a simple paging mechanism. +* A `page` object accompanying the notification list specifies `next` and `size` fields. +* The `next` field returned in the page must be provided as the subsequent request's `next` URL parameter to retrieve the next set of notifications. +* The `size` field will echo back to the request `page_size` parameter. + + + +* The `page_size` URL parameter controls how many notifications rae returned in a single page. If unprovided, a default of `500` is used. +* The `next` URL parameter informs Clair of the next set of paginated notifications to return. If not provided, the `0th` page is assumed. +* + +//// + +//// + +.Prerequisites + +* The Linux `make` command is required to start the local development environment. + +* Podman v3.0 or greater. Alternatively, you can use Docker or Docker Compose, however not all versions of Docker or Docker Compose have been tested. As a result, some versions might not work properly. ++ +This guide uses Podman with an implementation of Compose Specification. + +* Go v1.16 or greater. + +.Procedure + +. Enter the following command to close the Clair github repository: ++ +[source,terminal] +---- +$ git clone git@github.com:quay/clair.git +---- + +. Change into the Clair directory by entering the following command: ++ +[source,terminal] +---- +$ cd clair +---- + +. Start the Clair container by entering the following command: ++ +[source,terminal] +---- +$ podman-compose up -d +---- + +After the local development environment starts, the following infrastructure is available to you: + +* `localhost:8080`. This includes dashboards and debugging services. You can see Traefik configuration logs in `local-dev/traefik`, where various services are served. + +* `localhost:6060`. This includes Clair services. + +* {productname}. If started, {productname} will be started in a single node, local storage configuration. A random port will be forwarded from `localhost`. Use `podman port` to view mapping information. + +* PostgreSQL. PostgreSQL has a random port forwarded from `localhost` to the database server. See `local-dev/clair/init.sql` for credentials and permissions. Use `podman port` to view mapping information. + +[id="testing-clair"] +== Testing Clair on the local development environment + +After starting the Clair container, a {productname} server is forwarded to a random port on the host. + +. Locate, and open, the port hosting {productname}. + +. Click *Create Account* and create a new user, for example, `admin`. + +. Set a password. + +. To push to the {productname} container, you must exec into the skopeo container. For example: ++ +[source,terminal] +---- +$ podman exec -it quay-skopeo /usr/bin/skopeo copy --dst-creds ':' --dst-tls-verify=false clair-quay:8080//: +---- + +//// \ No newline at end of file diff --git a/modules/access-control-intro.adoc b/modules/access-control-intro.adoc index 8e5fe2001..d5121d368 100644 --- a/modules/access-control-intro.adoc +++ b/modules/access-control-intro.adoc @@ -1,6 +1,6 @@ [[access-control-intro]] -= Access control += Access control in {productname} -{productname} provides both Role Based Access Control (RBAC) and Fine-Grained Access Control, and has team features that allow for limited access control of repositories, organizations, and user privileges. {productname} access control features also provide support for dispersed organizations. +{productname} provides both role-based access control (RBAC) and fine-grained access control, and has team features that allow for limited access control of repositories, organizations, and user privileges. {productname} access control features also provide support for dispersed organizations. diff --git a/modules/airgap-clair.adoc b/modules/airgap-clair.adoc index 64651dab8..d4053b52c 100644 --- a/modules/airgap-clair.adoc +++ b/modules/airgap-clair.adoc @@ -1,11 +1,11 @@ -[[airgap-clair]] +:_content-type: CONCEPT +[id="airgap-clair"] = Using Clair in air-gapped environments -By default, Clair will attempt to run automated updates against Red Hat servers. To run Clair in network environments that are disconnected from the internet: - -* Disable Clair auto-update in the Clair configuration bundle -* Manually update the vulnerability database on a system with internet access and then export to disk -* Transfer the on-disk data to the target system using offline media and then manually import it into Clair +By default, Clair tries to run automated updates against Red Hat servers. To run Clair in network environments that are disconnected from the internet, you must execute the following actions: +* Disable Clair auto-update in the Clair configuration bundle. +* Manually update the vulnerability database on a system with internet access and then export to disk. +* Transfer the on-disk data to the target system using offline media, and then manually import it into Clair. Using Clair in air-gapped environments is fully containerized and, as a result, is easy to automate. \ No newline at end of file diff --git a/modules/airgap-intro.adoc b/modules/airgap-intro.adoc index caaaaffe7..f2388e648 100644 --- a/modules/airgap-intro.adoc +++ b/modules/airgap-intro.adoc @@ -1,13 +1,16 @@ -[[airgap-intro]] -= Air-gapped / disconnected deployments +:_content-type: CONCEPT +[id="arch-airgap-intro"] += Air-gapped or disconnected deployments + +In the following diagram, the upper deployment in the diagram shows {productname} and Clair connected to the internet, with an air-gapped {ocp} cluster accessing the {productname} registry through an explicit, allowlisted hole in the firewall. + +The lower deployment in the diagram shows {productname} and Clair running inside of the firewall, with image and CVE data transferred to the target system using offline media. The data is exported from a separate {productname} and Clair deployment that is connected to the internet. The following diagram shows how {productname} and Clair can be deployed in air-gapped or disconnected environments: +.{productname} and Clair in disconnected, or air-gapped, environments image:178_Quay_architecture_0821_air-gapped.png[Air-gapped deployment] -The upper deployment in the diagram shows {productname} and Clair connected to the internet, with an air-gapped OpenShift cluster accessing the Quay registry through an explicit, white-listed hole in the firewall - -The lower deployment in the diagram shows {productname} and Clair running inside the firewall, with image and CVE data transferred to the target system using offline media. The data is exported from a separate Quay and Clair deployment that is connected to the internet. diff --git a/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc b/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc index 2cf7974ca..2fdc718ea 100644 --- a/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc +++ b/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc @@ -7,7 +7,9 @@ -**Authorizations: ** +**Authorizations: **oauth2_implicit (**super:user**) + + [discrete] == Path parameters diff --git a/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc b/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc index 3e11f2592..afdaf6f93 100644 --- a/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc +++ b/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc @@ -7,7 +7,9 @@ -**Authorizations: ** +**Authorizations: **oauth2_implicit (**super:user**) + + [discrete] == Path parameters diff --git a/modules/api-namespacequota-getUserQuotaLimit.adoc b/modules/api-namespacequota-getUserQuotaLimit.adoc index 60a474191..97c8e7b73 100644 --- a/modules/api-namespacequota-getUserQuotaLimit.adoc +++ b/modules/api-namespacequota-getUserQuotaLimit.adoc @@ -16,10 +16,10 @@ [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**limit_id** + -_required_||string |path|**quota_id** + _required_||string +|path|**limit_id** + +_required_||string |=== diff --git a/modules/api-organization-createOrganizationApplication.adoc b/modules/api-organization-createOrganizationApplication.adoc index 81aee480d..fa4a4d720 100644 --- a/modules/api-organization-createOrganizationApplication.adoc +++ b/modules/api-organization-createOrganizationApplication.adoc @@ -16,7 +16,7 @@ Creates a new application under this organization. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + +|path|**orgname** + _required_|The name of the organization|string |=== @@ -29,15 +29,15 @@ Description of a new organization application. [options="header", width=100%, cols=".^3a,.^9a,.^4a"] |=== |Name|Description|Schema -|**name** + -_optional_|The name of the application|string -|**redirect_uri** + -_optional_|The URI for the application's OAuth redirect|string -|**application_uri** + -_optional_|The URI for the application's homepage|string -|**description** + +|**name** + +_required_|The name of the application|string +|**redirect_uri** + +_required_|The URI for the application's OAuth redirect|string +|**application_uri** + +_required_|The URI for the application's homepage|string +|**description** + _optional_|The human-readable description for the application|string -|**avatar_email** + +|**avatar_email** + _optional_|The e-mail address of the avatar to use for the application|string |=== diff --git a/modules/api-organization-getOrganizationMember.adoc b/modules/api-organization-getOrganizationMember.adoc index 2d355b143..32a0dc9ec 100644 --- a/modules/api-organization-getOrganizationMember.adoc +++ b/modules/api-organization-getOrganizationMember.adoc @@ -16,10 +16,10 @@ Retrieves the details of a member of the organization. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**membername** + -_required_|The username of the organization member|string |path|**orgname** + _required_|The name of the organization|string +|path|**membername** + +_required_|The username of the organization member|string |=== diff --git a/modules/api-organization-removeOrganizationMember.adoc b/modules/api-organization-removeOrganizationMember.adoc index 75e6b0616..cddbebc45 100644 --- a/modules/api-organization-removeOrganizationMember.adoc +++ b/modules/api-organization-removeOrganizationMember.adoc @@ -17,10 +17,10 @@ Removes a member from an organization, revoking all its repository priviledges a [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**membername** + -_required_|The username of the organization member|string |path|**orgname** + _required_|The name of the organization|string +|path|**membername** + +_required_|The username of the organization member|string |=== diff --git a/modules/api-organization-updateOrganizationApplication.adoc b/modules/api-organization-updateOrganizationApplication.adoc index 5938ddd4a..e88462352 100644 --- a/modules/api-organization-updateOrganizationApplication.adoc +++ b/modules/api-organization-updateOrganizationApplication.adoc @@ -16,9 +16,9 @@ Updates an application under this organization. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**client_id** + +|path|**client_id** + _required_|The OAuth client ID|string -|path|**orgname** + +|path|**orgname** + _required_|The name of the organization|string |=== @@ -31,15 +31,15 @@ Description of an updated application. [options="header", width=100%, cols=".^3a,.^9a,.^4a"] |=== |Name|Description|Schema -|**name** + -_optional_|The name of the application|string -|**redirect_uri** + -_optional_|The URI for the application's OAuth redirect|string -|**application_uri** + -_optional_|The URI for the application's homepage|string -|**description** + +|**name** + +_required_|The name of the application|string +|**redirect_uri** + +_required_|The URI for the application's OAuth redirect|string +|**application_uri** + +_required_|The URI for the application's homepage|string +|**description** + _optional_|The human-readable description for the application|string -|**avatar_email** + +|**avatar_email** + _optional_|The e-mail address of the avatar to use for the application|string |=== diff --git a/modules/api-organization-validateProxyCacheConfig.adoc b/modules/api-organization-validateProxyCacheConfig.adoc index a25f45e77..fd8b5abc7 100644 --- a/modules/api-organization-validateProxyCacheConfig.adoc +++ b/modules/api-organization-validateProxyCacheConfig.adoc @@ -5,6 +5,10 @@ [discrete] == POST /api/v1/organization/{orgname}/validateproxycache +[NOTE] +==== +Running `POST /api/v1/organization/{orgname}/validateproxycache` currently returns a `202` response payload of `Valid`. This is a known issue. It should return a `Successful Creation` response payload. This will be fixed in a future version of {productname}. +==== **Authorizations: ** @@ -14,11 +18,12 @@ [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + +|path|**orgname** + _required_||string |=== + [discrete] == Request body schema (application/json) @@ -27,7 +32,7 @@ Proxy cache configuration for an organization [options="header", width=100%, cols=".^3a,.^9a,.^4a"] |=== |Name|Description|Schema -|**upstream_registry** + +|**upstream_registry** + _optional_|Name of the upstream registry that is to be cached|string |=== diff --git a/modules/api-permission-changeUserPermissions.adoc b/modules/api-permission-changeUserPermissions.adoc index 0db036092..e987ca443 100644 --- a/modules/api-permission-changeUserPermissions.adoc +++ b/modules/api-permission-changeUserPermissions.adoc @@ -16,10 +16,10 @@ Update the perimssions for an existing repository. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**username** + -_required_|The username of the user to which the permission applies|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**username** + +_required_|The username of the user to which the permission applies|string |=== diff --git a/modules/api-permission-deleteUserPermissions.adoc b/modules/api-permission-deleteUserPermissions.adoc index 984e109b5..810a83d37 100644 --- a/modules/api-permission-deleteUserPermissions.adoc +++ b/modules/api-permission-deleteUserPermissions.adoc @@ -16,10 +16,10 @@ Delete the permission for the user. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**username** + -_required_|The username of the user to which the permission applies|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**username** + +_required_|The username of the user to which the permission applies|string |=== diff --git a/modules/api-permission-getUserPermissions.adoc b/modules/api-permission-getUserPermissions.adoc index 5d2d607c9..f3ec07e6c 100644 --- a/modules/api-permission-getUserPermissions.adoc +++ b/modules/api-permission-getUserPermissions.adoc @@ -16,10 +16,10 @@ Get the permission for the specified user. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**username** + -_required_|The username of the user to which the permission applies|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**username** + +_required_|The username of the user to which the permission applies|string |=== diff --git a/modules/api-permission-getUserTransitivePermission.adoc b/modules/api-permission-getUserTransitivePermission.adoc index 5b8680972..a41420846 100644 --- a/modules/api-permission-getUserTransitivePermission.adoc +++ b/modules/api-permission-getUserTransitivePermission.adoc @@ -16,10 +16,10 @@ Get the fetch the permission for the specified user. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**username** + -_required_|The username of the user to which the permissions apply|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**username** + +_required_|The username of the user to which the permissions apply|string |=== diff --git a/modules/api-prototype-deleteOrganizationPrototypePermission.adoc b/modules/api-prototype-deleteOrganizationPrototypePermission.adoc index 9af601ed6..eba52ea75 100644 --- a/modules/api-prototype-deleteOrganizationPrototypePermission.adoc +++ b/modules/api-prototype-deleteOrganizationPrototypePermission.adoc @@ -16,10 +16,10 @@ Delete an existing permission prototype. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**prototypeid** + -_required_|The ID of the prototype|string |path|**orgname** + _required_|The name of the organization|string +|path|**prototypeid** + +_required_|The ID of the prototype|string |=== diff --git a/modules/api-prototype-updateOrganizationPrototypePermission.adoc b/modules/api-prototype-updateOrganizationPrototypePermission.adoc index 04ce75d05..53507c52c 100644 --- a/modules/api-prototype-updateOrganizationPrototypePermission.adoc +++ b/modules/api-prototype-updateOrganizationPrototypePermission.adoc @@ -16,10 +16,10 @@ Update the role of an existing permission prototype. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**prototypeid** + -_required_|The ID of the prototype|string |path|**orgname** + _required_|The name of the organization|string +|path|**prototypeid** + +_required_|The ID of the prototype|string |=== diff --git a/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc b/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc index 47b3be9a0..5b56fab01 100644 --- a/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc +++ b/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc @@ -16,10 +16,10 @@ [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**quota_id** + -_required_||string |path|**namespace** + _required_||string +|path|**quota_id** + +_required_||string |=== diff --git a/modules/api-superuser-changeUserQuotaSuperUser.adoc b/modules/api-superuser-changeUserQuotaSuperUser.adoc index 00bb6bced..d1a856b2d 100644 --- a/modules/api-superuser-changeUserQuotaSuperUser.adoc +++ b/modules/api-superuser-changeUserQuotaSuperUser.adoc @@ -16,10 +16,10 @@ [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**quota_id** + -_required_||string |path|**namespace** + _required_||string +|path|**quota_id** + +_required_||string |=== diff --git a/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc b/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc index 671635783..6792048a4 100644 --- a/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc +++ b/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc @@ -16,10 +16,10 @@ [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**quota_id** + -_required_||string |path|**namespace** + _required_||string +|path|**quota_id** + +_required_||string |=== diff --git a/modules/api-superuser-deleteUserQuotaSuperUser.adoc b/modules/api-superuser-deleteUserQuotaSuperUser.adoc index 45b36f7f6..fbec9ee06 100644 --- a/modules/api-superuser-deleteUserQuotaSuperUser.adoc +++ b/modules/api-superuser-deleteUserQuotaSuperUser.adoc @@ -16,10 +16,10 @@ [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**quota_id** + -_required_||string |path|**namespace** + _required_||string +|path|**quota_id** + +_required_||string |=== diff --git a/modules/api-superuser-listOrganizationQuotaSuperUser.adoc b/modules/api-superuser-listOrganizationQuotaSuperUser.adoc new file mode 100644 index 000000000..3fd2d5722 --- /dev/null +++ b/modules/api-superuser-listOrganizationQuotaSuperUser.adoc @@ -0,0 +1,35 @@ + += listOrganizationQuotaSuperUser + + +[discrete] +== GET /api/v1/superuser/users/{namespace}/quota + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-superuser-listUserQuotaSuperUser.adoc b/modules/api-superuser-listUserQuotaSuperUser.adoc new file mode 100644 index 000000000..14670c1a2 --- /dev/null +++ b/modules/api-superuser-listUserQuotaSuperUser.adoc @@ -0,0 +1,35 @@ + += listUserQuotaSuperUser + + +[discrete] +== GET /api/v1/superuser/organization/{namespace}/quota + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-team-deleteOrganizationTeam.adoc b/modules/api-team-deleteOrganizationTeam.adoc index 7546f4285..2b31d304e 100644 --- a/modules/api-team-deleteOrganizationTeam.adoc +++ b/modules/api-team-deleteOrganizationTeam.adoc @@ -16,10 +16,10 @@ Delete the specified team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**teamname** + -_required_|The name of the team|string |path|**orgname** + _required_|The name of the organization|string +|path|**teamname** + +_required_|The name of the team|string |=== diff --git a/modules/api-team-deleteOrganizationTeamMember.adoc b/modules/api-team-deleteOrganizationTeamMember.adoc index 91b1b1680..e9cbfbaab 100644 --- a/modules/api-team-deleteOrganizationTeamMember.adoc +++ b/modules/api-team-deleteOrganizationTeamMember.adoc @@ -18,12 +18,12 @@ Delete a member of a team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string |path|**teamname** + _required_|The name of the team|string |path|**membername** + _required_|The username of the team member|string -|path|**orgname** + -_required_|The name of the organization|string |=== diff --git a/modules/api-team-deleteTeamMemberEmailInvite.adoc b/modules/api-team-deleteTeamMemberEmailInvite.adoc new file mode 100644 index 000000000..af324374e --- /dev/null +++ b/modules/api-team-deleteTeamMemberEmailInvite.adoc @@ -0,0 +1,39 @@ + += deleteTeamMemberEmailInvite +Delete an invite of an email address to join a team. + +[discrete] +== DELETE /api/v1/organization/{orgname}/team/{teamname}/invite/{email} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_||string +|path|**teamname** + +_required_||string +|path|**email** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-team-getOrganizationTeamMembers.adoc b/modules/api-team-getOrganizationTeamMembers.adoc index bcd03a3d8..14f5e8e64 100644 --- a/modules/api-team-getOrganizationTeamMembers.adoc +++ b/modules/api-team-getOrganizationTeamMembers.adoc @@ -16,10 +16,10 @@ Retrieve the list of members for the specified team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**teamname** + -_required_|The name of the team|string |path|**orgname** + _required_|The name of the organization|string +|path|**teamname** + +_required_|The name of the team|string |=== diff --git a/modules/api-team-getOrganizationTeamPermissions.adoc b/modules/api-team-getOrganizationTeamPermissions.adoc index aa3aefc8c..fa782e299 100644 --- a/modules/api-team-getOrganizationTeamPermissions.adoc +++ b/modules/api-team-getOrganizationTeamPermissions.adoc @@ -14,10 +14,10 @@ Returns the list of repository permissions for the org's team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**teamname** + -_required_|The name of the team|string |path|**orgname** + _required_|The name of the organization|string +|path|**teamname** + +_required_|The name of the team|string |=== diff --git a/modules/api-team-inviteTeamMemberEmail.adoc b/modules/api-team-inviteTeamMemberEmail.adoc new file mode 100644 index 000000000..c0f73c997 --- /dev/null +++ b/modules/api-team-inviteTeamMemberEmail.adoc @@ -0,0 +1,39 @@ + += inviteTeamMemberEmail +Invites an email address to an existing team. + +[discrete] +== PUT /api/v1/organization/{orgname}/team/{teamname}/invite/{email} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_||string +|path|**teamname** + +_required_||string +|path|**email** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-team-updateOrganizationTeam.adoc b/modules/api-team-updateOrganizationTeam.adoc index 7d1a8321b..cd6b715c3 100644 --- a/modules/api-team-updateOrganizationTeam.adoc +++ b/modules/api-team-updateOrganizationTeam.adoc @@ -16,10 +16,10 @@ Update the org-wide permission for the specified team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**teamname** + -_required_|The name of the team|string |path|**orgname** + _required_|The name of the organization|string +|path|**teamname** + +_required_|The name of the team|string |=== diff --git a/modules/api-team-updateOrganizationTeamMember.adoc b/modules/api-team-updateOrganizationTeamMember.adoc index daf438667..e77efc676 100644 --- a/modules/api-team-updateOrganizationTeamMember.adoc +++ b/modules/api-team-updateOrganizationTeamMember.adoc @@ -16,12 +16,12 @@ Adds or invites a member to an existing team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string |path|**teamname** + _required_|The name of the team|string |path|**membername** + _required_|The username of the team member|string -|path|**orgname** + -_required_|The name of the organization|string |=== diff --git a/modules/api-trigger-activateBuildTrigger.adoc b/modules/api-trigger-activateBuildTrigger.adoc index 0c2397f62..90beb7e33 100644 --- a/modules/api-trigger-activateBuildTrigger.adoc +++ b/modules/api-trigger-activateBuildTrigger.adoc @@ -16,10 +16,10 @@ Activate the specified build trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**trigger_uuid** + -_required_|The UUID of the build trigger|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string |=== diff --git a/modules/api-trigger-deleteBuildTrigger.adoc b/modules/api-trigger-deleteBuildTrigger.adoc index 6450458ad..17aad9b57 100644 --- a/modules/api-trigger-deleteBuildTrigger.adoc +++ b/modules/api-trigger-deleteBuildTrigger.adoc @@ -16,10 +16,10 @@ Delete the specified build trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**trigger_uuid** + -_required_|The UUID of the build trigger|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string |=== diff --git a/modules/api-trigger-getBuildTrigger.adoc b/modules/api-trigger-getBuildTrigger.adoc index 912be5b2e..cdab3d32e 100644 --- a/modules/api-trigger-getBuildTrigger.adoc +++ b/modules/api-trigger-getBuildTrigger.adoc @@ -16,10 +16,10 @@ Get information for the specified build trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**trigger_uuid** + -_required_|The UUID of the build trigger|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string |=== diff --git a/modules/api-trigger-listTriggerRecentBuilds.adoc b/modules/api-trigger-listTriggerRecentBuilds.adoc index 14cb91738..6b4930d2c 100644 --- a/modules/api-trigger-listTriggerRecentBuilds.adoc +++ b/modules/api-trigger-listTriggerRecentBuilds.adoc @@ -16,10 +16,10 @@ List the builds started by the specified trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**trigger_uuid** + -_required_|The UUID of the build trigger|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string |=== diff --git a/modules/api-trigger-manuallyStartBuildTrigger.adoc b/modules/api-trigger-manuallyStartBuildTrigger.adoc index f7ee50898..e65de5f2f 100644 --- a/modules/api-trigger-manuallyStartBuildTrigger.adoc +++ b/modules/api-trigger-manuallyStartBuildTrigger.adoc @@ -16,10 +16,10 @@ Manually start a build from the specified trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**trigger_uuid** + -_required_|The UUID of the build trigger|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string |=== diff --git a/modules/api-trigger-updateBuildTrigger.adoc b/modules/api-trigger-updateBuildTrigger.adoc index 1ace1e253..a161b524a 100644 --- a/modules/api-trigger-updateBuildTrigger.adoc +++ b/modules/api-trigger-updateBuildTrigger.adoc @@ -16,10 +16,10 @@ Updates the specified build trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**trigger_uuid** + -_required_|The UUID of the build trigger|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string |=== diff --git a/modules/arch-intro-access-control.adoc b/modules/arch-intro-access-control.adoc new file mode 100644 index 000000000..408ded38e --- /dev/null +++ b/modules/arch-intro-access-control.adoc @@ -0,0 +1,4 @@ +[[arch-intro-access-control]] += Access control + +{productname} provides both role-based access control (RBAC) and fine-grained access control, and has team features that allow for limited access control of repositories, organizations, and user privileges. {productname} access control features also provide support for dispersed organizations. \ No newline at end of file diff --git a/modules/arch-intro-build-automation.adoc b/modules/arch-intro-build-automation.adoc new file mode 100644 index 000000000..00e2de9f9 --- /dev/null +++ b/modules/arch-intro-build-automation.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="arch-intro-build-automation"] += Build automation + +{productname} supports building Dockerfiles using a set of worker nodes on {ocp} or Kubernetes platforms. Build triggers, such as GitHub webhooks, can be configured to automatically build new versions of your repositories when new code is committed. + +Prior to {productname} 3.7, {productname} ran Podman commands in virtual machines launched by pods. Running builds on virtual platforms requires enabling nested virtualization, which is not featured in {rhel} or {ocp}. As a result, builds had to run on bare metal clusters, which is an inefficient use of resources. With {productname} 3.7, this requirement was removed and builds could be run on {ocp} clusters running on virtualized or bare metal platforms. \ No newline at end of file diff --git a/modules/arch-intro-content-distribution.adoc b/modules/arch-intro-content-distribution.adoc new file mode 100644 index 000000000..33650b98b --- /dev/null +++ b/modules/arch-intro-content-distribution.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="arch-intro-content-distribution"] += Content distribution + +Content distribution features in {productname} include the following: + +Repository mirroring:: {productname} repository mirroring lets you mirror images from {productname} and other container registries, like JFrog Artifactory, Harbor, or Sonatype Nexus Repository, into your {productname} cluster. Using repository mirroring, you can synchronize images to {productname} based on repository names and tags. + +Geo-replication:: {productname} geo-replication allows multiple, geographically distributed {productname} deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed {productname} setup. Image data is asynchronously replicated in the background with transparent failover and redirection for clients. + +Deployment in disconnected or air-gapped environments:: {productname} is deployable in a disconnected environment in one of two ways: ++ +* {productname} and Clair connected to the internet, with an air-gapped {ocp} cluster accessing the {productname} registry through an explicit, allowlisted hole in the firewall. +* Using two independent {productname} and Clair installations. One installation is connected to the internet and another within a disconnected, or firewalled, environment. Image and vulnerability data is manually transferred from the connected environment to the disconnected environment using offline media. \ No newline at end of file diff --git a/modules/arch-intro-integration.adoc b/modules/arch-intro-integration.adoc new file mode 100644 index 000000000..24026c89a --- /dev/null +++ b/modules/arch-intro-integration.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="arch-intro-integration"] += Integration + +{productname} can integrate with almost all Git-compatible systems. {productname} offers automative configuration for GitHub, GitLab, or BitBucket, which allows users to continuously build and serve their containerized software. + +[id="arch-rest-api"] +== REST API + +{productname} provides a full OAuth 2, RESTful API. RESTful API offers the following benefits: + +* Availability from endpoints of each {productname} instance from the URL, for example, `\https://quay-server.example.com/api/v1` +* Allow users to connect to endpoints through a browser, to `GET`, `DELETE`, `POST`, and `PUT` {productname} settings provided by a discovery endpoint that is usable by Swagger. +* The API can be invoked by the URL, for example, `\https://quay-server.example.com/api/v1`, and uses JSON objects as payload. \ No newline at end of file diff --git a/modules/arch-core-intro.adoc b/modules/arch-intro-other-features.adoc similarity index 65% rename from modules/arch-core-intro.adoc rename to modules/arch-intro-other-features.adoc index 9bc4270ee..12e694d56 100644 --- a/modules/arch-core-intro.adoc +++ b/modules/arch-intro-other-features.adoc @@ -1,11 +1,8 @@ -[[arch-core-intro]] -= Core functionality +[[arch-intro-other-features]] += Other features - - -* High availability * Full standards / spec support (Docker v2-2) * Long-term protocol support * OCI compatibility through test suite compliance * Enterprise grade support -* Regular updates +* Regular updates \ No newline at end of file diff --git a/modules/arch-intro-recent-features.adoc b/modules/arch-intro-recent-features.adoc new file mode 100644 index 000000000..cfba552d1 --- /dev/null +++ b/modules/arch-intro-recent-features.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="arch-intro-recent-features"] += Recently added features + +See the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/red_hat_quay_release_notes/index[{productname} Release Notes] for information about the latest features, enhancements, deprecations, and known issues. \ No newline at end of file diff --git a/modules/arch-intro-scalability.adoc b/modules/arch-intro-scalability.adoc new file mode 100644 index 000000000..724b2b078 --- /dev/null +++ b/modules/arch-intro-scalability.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="arch-intro-scalability"] += Scalability and high availability (HA) + +The code base used for {productname} is the same as the code base used for link:https::/quay.io[Quay.io], which is the highly available container image registry hosted by Red Hat. Quay.io and {productname} offer a multitenant SaaS solution. As a result, users can be confident that their deployment can deliver at scale with high availability, whether their deployment is on-prem or on a public cloud. \ No newline at end of file diff --git a/modules/arch-intro-security.adoc b/modules/arch-intro-security.adoc new file mode 100644 index 000000000..735c5988d --- /dev/null +++ b/modules/arch-intro-security.adoc @@ -0,0 +1,41 @@ +:_content-type: CONCEPT +[id="arch-intro-security"] += Security + +{productname} is built for real enterprise use cases where content governance and security are two major focus areas. + +{productname} content governance and security includes built-in vulnerability scanning through Clair. + +[id="arch-tls-ssl-config"] +== TLS/SSL configuration + +You can configure SSL/TLS for the {productname} registry in the configuration tool UI or in the configuration bundle. SSL/TSL connections to the database, to image storage, and to Redis can also be specified through the configuration tool. + +Sensitive fields in the database and at run time are automatically encrypted. You can also require HTTPS and verify certificates for the {productname} registry during mirror operations. + +[id="arch-intro-clair"] +== Clair + +Clair is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {productname} and can be used in both standalone and Operator deployments. It can be run in highly scalable configurations, where components can be scaled separately as appropriate for enterprise environments. + +[id="arch-operator-security"] +== {productname} Operator security + +When {productname} is deployed using the {productname} Operator, the `tls` component is set to `managed` by default and the {ocp}'s Certificate Authority is used to create HTTPS endpoints and to rotate TLS certificates. + +If you set the `tls` component to `unmanaged`, you can provide custom certificates to the pass-through Routes, however you are responsible for certificate rotation. + +[id="arch-builders"] +== Fully isolated builds + +{productname} now supports building Dockerfiles that uses both bare metal and virtual builders. + +By using bare-metal worker nodes, each build is done in an ephemeral virtual machine to ensure isolation and security while the build is running. This provides the best protection against rogue payloads. + +Running builds directly in a container does not have the same isolation as when using virtual machines, but it still provides good protection. + + +[id="arch-rbac"] +== Role-based access controls + +{productname} provides full isolation of registry content by organization and team with fine-grained entitlements for read, write, and administrative access by users and automated tools. diff --git a/modules/arch-intro.adoc b/modules/arch-intro.adoc index 38e4a37b2..ca2445f31 100644 --- a/modules/arch-intro.adoc +++ b/modules/arch-intro.adoc @@ -1,19 +1,11 @@ -[[arch-intro]] -= {productname} features +:_content-type: CONCEPT +[id="arch-intro"] += {productname} overview +{productname} is a distributed and highly available container image registry for your enterprise. -{productname} is a trusted, open source container registry platform that runs everywhere, but runs best on Red Hat OpenShift. It scales without limits, from a developer laptop to a container host or Kubernetes, and can be deployed on-premise or on public cloud. It provides global governance and security controls, with features including image vulnerability scanning, access controls, geo-replication and repository mirroring. - +{productname} container registry platform provides secure storage, distribution, access controls, geo-replications, repository mirroring, and governance of containers and cloud-native artifacts on any infrastructure. It is available as a standalone component or as an Operator for {ocp}, and is deployable on-prem or on a public cloud. image:178_Quay_architecture_0821_features.png[Quay features] -This guide provides an insight into architectural patterns to use when deploying {productname}. It contains sizing guidance and deployment prerequisites, along with best practices for ensuring high availability for your {productname} registry. - - -* xref:arch-core-intro[Core functionality] -* xref:security-intro[Security] -* xref:content-distrib-intro[Content distribution] -* xref:access-control-intro[Access control] -* xref:build-automation-intro[Build automation] -* xref:scalability-intro[Scalability] -* xref:integration-intro[Integration] +This guide provides an insight into architectural patterns to use when deploying {productname}. This guide also offers sizing guidance and deployment prerequisites, along with best practices for ensuring high availability for your {productname} registry. diff --git a/modules/arch-mirror-registry.adoc b/modules/arch-mirror-registry.adoc new file mode 100644 index 000000000..46b58627f --- /dev/null +++ b/modules/arch-mirror-registry.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="arch-mirror-registry"] += Mirror registry for Red Hat OpenShift + +The _mirror registry for Red Hat OpenShift_ is small-scale version of {productname} that you can use as a target for mirroring the required container images of {ocp} for disconnected installations. + +For disconnected deployments of {ocp}, a container registry is required to carry out the installation of the clusters. To run a production-grade registry service on such a cluster, you must create a separate registry deployment to install the first cluster. The _mirror registry for Red Hat OpenShift_ addresses this need and is included in every {ocp} subscription. It is available for download on the link:https://console.redhat.com/openshift/downloads#tool-mirror-registry[OpenShift console *Downloads*] page. + +The _mirror registry for Red Hat OpenShift_ allows users to install a small-scale version of {productname} and its required components using the `mirror-registry` command line interface (CLI) tool. The _mirror registry for Red Hat OpenShift_ is deployed automatically with pre-configured local storage and a local database. It also includes auto-generated user credentials and access permissions with a single set of inputs and no additional configuration choices to get started. + +The _mirror registry for Red Hat OpenShift_ provides a pre-determined network configuration and reports deployed component credentials and access URLs upon success. A limited set of optional configuration inputs like fully qualified domain name (FQDN) services, superuser name and password, and custom TLS certificates are also provided. This provides users with a container registry so that they can easily create an offline mirror of all {ocp} release content when running {ocp} in restricted network environments. + +The _mirror registry for Red Hat OpenShift_ is limited to hosting images that are required to install a disconnected {ocp} cluster, such as release images or Operator images. It uses local storage. Content built by customers should not be hosted by the _mirror registry for Red Hat OpenShift_. + +Unlike {productname}, the _mirror registry for Red Hat OpenShift_ is not a highly-available registry. Only local file system storage is supported. Using the _mirror registry for Red Hat OpenShift_ with more than one cluster is discouraged, because multiple clusters can create a single point of failure when updating your cluster fleet. It is advised to use the _mirror registry for Red Hat OpenShift_ to install a cluster that can host a production-grade, highly available registry such as {productname}, which can serve {ocp} content to other clusters. + +More information is available at link:https://docs.openshift.com/container-platform/4.10/installing/disconnected_install/installing-mirroring-creating-registry.html[Creating a mirror registry with _mirror registry for Red Hat OpenShift_]. \ No newline at end of file diff --git a/modules/arch-prereqs.adoc b/modules/arch-prereqs.adoc new file mode 100644 index 000000000..c3e5ac0d4 --- /dev/null +++ b/modules/arch-prereqs.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="arch-prereqs"] += {productname} prerequisites + +Before deploying {productname}, you must provision image storage, a database, and Redis. diff --git a/modules/attributes.adoc b/modules/attributes.adoc index 42b8fadc3..c3d2150ca 100644 --- a/modules/attributes.adoc +++ b/modules/attributes.adoc @@ -1,32 +1,41 @@ :productname: Red Hat Quay :productshortname: Quay :imagesdir: ../images +:ocp: OpenShift Container Platform +:qbo: Quay Bridge Operator +:rhel: Red Hat Enterprise Linux (RHEL) +:rhel-short: RHEL ifeval::["{productname}" == "Project Quay"] :upstream: :productname: Project Quay -:productversion: qui-gon -:productmin: qui-gon -:productminv: qui-gon +:productversion: 3 +:producty: 3.9 +:productminv: v3.9.0 :productrepo: quay.io/projectquay -:clairnewver: qui-gon :quayimage: quay :clairimage: clair +:clairproductminv: 4.6.0 :builderimage: quay-builder -:builderqemuimage: quay-builder-qemu-fedoracoreos +:builderqemuimage: quay-builder-qemu:main +:postgresimage: centos/postgresql-10-centos7@sha256:de1560cb35e5ec643e7b3a772ebaac8e3a7a2a8e8271d9e91ff023539b4dfb33 +:redisimage: centos/redis-32-centos7@sha256:06dbb609484330ec6be6090109f1fa16e936afcf975d1cbc5fff3e6c7cae7542 endif::[] ifeval::["{productname}" == "Red Hat Quay"] :downstream: :productname: Red Hat Quay :productversion: 3 -:producty: 3.7 -:productmin: 3.7.0 -:productminv: v3.7.0 +:producty: 3.9 +:productmin: 3.9.0 +:productminv: v3.9.0 :productrepo: registry.redhat.io/quay -:clairnewver: v3.7.0 +:clairnewver: v3.8 :quayimage: quay-rhel8 :clairimage: clair-rhel8 +:clairproductminv: 4.6.0 :builderimage: quay-builder-rhel8 :builderqemuimage: quay-builder-qemu-rhcos +:postgresimage: registry.redhat.io/rhel8/postgresql-13:1-109 +:redisimage: registry.redhat.io/rhel8/redis-6:1-110 endif::[] diff --git a/modules/backing-up-and-restoring-intro.adoc b/modules/backing-up-and-restoring-intro.adoc new file mode 100644 index 000000000..ff9b30ba7 --- /dev/null +++ b/modules/backing-up-and-restoring-intro.adoc @@ -0,0 +1,4 @@ +[[backing-up-and-restoring-intro]] += Backing up and restoring {productname} managed by the {productname} Operator + +Use the content within this section to back up and restore {productname} when managed by the {productname} Operator on OpenShift Container Platform. diff --git a/modules/backing-up-red-hat-quay-operator.adoc b/modules/backing-up-red-hat-quay-operator.adoc new file mode 100644 index 000000000..2ef80332f --- /dev/null +++ b/modules/backing-up-red-hat-quay-operator.adoc @@ -0,0 +1,292 @@ +[[backing-up-red-hat-quay-operator]] += Backing up {productname} + +This procedure describes how to create a backup of {productname} deployed on OpenShift Container Platform using the {productname} Operator + +.Prerequisites + +* A healthy {productname} deployment on OpenShift Container Platform using the {productname} Operator (status condition `Available` is set to `true`) +* The components `quay`, `postgres` and `objectstorage` are set to `managed: true` +* If the component `clair` is set to `managed: true` the component `clairpostgres` is also set to `managed: true` (starting with {productname} Operator v3.7 or later) + +[NOTE] +==== +If your deployment contains partially unmanaged database or storage components and you are using external services for Postgres or S3-compatible object storage to run your {productname} deployment, you must refer to the service provider or vendor documentation to create a backup of the data. +You can refer to the tools described in this guide as a starting point on how to backup your external Postgres database or object storage. +==== + +== {productname} configuration backup + + +. Backup the `QuayRegistry` custom resource by exporting it: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml > quay-registry.yaml +---- + +. Edit the resulting `quayregistry.yaml` and remove the status section and the following metadata fields: ++ +[source,yaml] +---- + metadata.creationTimestamp + metadata.finalizers + metadata.generation + metadata.resourceVersion + metadata.uid +---- + +. Backup the managed keys secret: ++ +[NOTE] +==== +If you are running a version older than {productname} 3.7.0, this step can be skipped. Some secrets are automatically generated while deploying Quay for the first time. These are stored in a secret called `-quay-registry-managed-secret-keys` in the namespace of the `QuayRegistry` resource. +==== ++ +[source,terminal] +---- +$ oc get secret -n -quay-registry-managed-secret-keys -o yaml > managed-secret-keys.yaml +---- + +. Edit the the resulting `managed-secret-keys.yaml` file and remove the entry `metadata.ownerReferences`. Your `managed-secret-keys.yaml` file should look similar to the following: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: -quay-registry-managed-secret-keys + namespace: +data: + CONFIG_EDITOR_PW: + DATABASE_SECRET_KEY: + DB_ROOT_PW: + DB_URI: + SECRET_KEY: + SECURITY_SCANNER_V4_PSK: +---- ++ +All information under the `data` property should remain the same. + +. Backup the current Quay configuration: ++ +[source,terminal] +---- +$ oc get secret -n $(oc get quayregistry -n -o jsonpath='{.spec.configBundleSecret}') -o yaml > config-bundle.yaml +---- + +. Backup the `/conf/stack/config.yaml` file mounted inside of the Quay pods: ++ +[source,terminal] +---- +$ oc exec -it quay-pod-name -- cat /conf/stack/config.yaml > quay-config.yaml +---- + +== Scale down your {productname} deployment + +[IMPORTANT] +==== +This step is needed to create a consistent backup of the state of your {productname} deployment. Do not omit this step, including in setups where Postgres databases and/or S3-compatible object storage are provided by external services (unmanaged by the Operator). +==== + +. *For Operator version 3.7 and newer:* Scale down the {productname} deployment by disabling auto scaling and overriding the replica count for {productname}, mirror workers, and Clair (if managed). Your `QuayRegistry` resource should look similar to the following: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: false <1> + - kind: quay + managed: true + overrides: <2> + replicas: 0 + - kind: clair + managed: true + overrides: + replicas: 0 + - kind: mirror + managed: true + overrides: + replicas: 0 + … +---- +<1> Disable auto scaling of Quay, Clair and Mirroring workers +<2> Set the replica count to 0 for components accessing the database and objectstorage + +. *For Operator version 3.6 and earlier*: Scale down the {productname} deployment by scaling down the {productname} Operator first and then the managed {productname} resources: ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/^quay-operator/ {print $1}') -n +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-app/ {print $1}') -n +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-mirror/ {print $1}') -n +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/clair-app/ {print $1}') -n +---- + +. Wait for the `registry-quay-app`, `registry-quay-mirror` and `registry-clair-app` pods (depending on which components you set to be managed by the {productname} Operator) to disappear. You can check their status by running the following command: ++ +[source,terminal] +---- +$ oc get pods -n +---- ++ +Example output: ++ +[source,terminal] +---- +$ oc get pod + +quay-operator.v3.7.1-6f9d859bd-p5ftc 1/1 Running 0 12m +quayregistry-clair-postgres-7487f5bd86-xnxpr 1/1 Running 1 (12m ago) 12m +quayregistry-quay-app-upgrade-xq2v6 0/1 Completed 0 12m +quayregistry-quay-config-editor-6dfdcfc44f-hlvwm 1/1 Running 0 73s +quayregistry-quay-database-859d5445ff-cqthr 1/1 Running 0 12m +quayregistry-quay-redis-84f888776f-hhgms 1/1 Running 0 12m +---- + +== {productname} managed database backup + +[NOTE] +==== +If your {productname} deployment is configured with external (unmanged) Postgres database(s), refer to your vendor's documentation on how to create a consistent backup of these databases. +==== + +. Identify the Quay PostgreSQL pod name: ++ +[source,terminal] +---- +$ oc get pod -l quay-component=postgres -n -o jsonpath='{.items[0].metadata.name}' +---- ++ +Example output: ++ +[source,terminal] +---- +quayregistry-quay-database-59f54bb7-58xs7 +---- + +. Obtain the Quay database name: ++ +[source,terminal] +---- +$ oc -n rsh $(oc get pod -l app=quay -o NAME -n |head -n 1) cat /conf/stack/config.yaml|awk -F"/" '/^DB_URI/ {print $4}' +quayregistry-quay-database +---- + +. Download a backup database: ++ +[source,terminal] +---- +$ oc exec quayregistry-quay-database-59f54bb7-58xs7 -- /usr/bin/pg_dump -C quayregistry-quay-database > backup.sql +---- + +=== {productname} managed object storage backup + +The instructions in this section apply to the following configurations: + +* Standalone, multi-cloud object gateway configurations +* OpenShift Data Foundations storage requires that the {productname} Operator provisioned an S3 object storage bucket from, through the ObjectStorageBucketClaim API + +[NOTE] +==== +If your {productname} deployment is configured with external (unmanged) object storage, refer to your vendor's documentation on how to create a copy of the content of Quay's storage bucket. +==== + +. Decode and export the `AWS_ACCESS_KEY_ID`: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_ACCESS_KEY_ID}' |base64 -d) +---- + +. Decode and export the `AWS_SECRET_ACCESS_KEY_ID`: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_SECRET_ACCESS_KEY}' |base64 -d) +---- + +. Create a new directory and copy all blobs to it: ++ +[source,terminal] +---- +$ mkdir blobs + +$ aws s3 sync --no-verify-ssl --endpoint https://$(oc get route s3 -n openshift-storage -o jsonpath='{.spec.host}') s3://$(oc get cm -l app=noobaa -n -o jsonpath='{.items[0].data.BUCKET_NAME}') ./blobs +---- + +[NOTE] +==== +You can also use link:https://rclone.org/[rclone] or link:https://s3tools.org/s3cmd[sc3md] instead of the AWS command line utility. +==== + +== Scale the {productname} deployment back up + +. *For Operator version 3.7 and newer:* Scale up the {productname} deployment by re-enabling auto scaling, if desired, and removing the replica overrides for Quay, mirror workers and Clair as applicable. Your `QuayRegistry` resource should look similar to the following: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: true <1> + - kind: quay <2> + managed: true + - kind: clair + managed: true + - kind: mirror + managed: true + … +---- +<1> Re-enables auto scaling of Quay, Clair and Mirroring workers again (if desired) +<2> Replica overrides are removed again to scale the Quay components back up + +. *For Operator version 3.6 and earlier:* Scale up the {productname} deployment by scaling up the {productname} Operator again: ++ +[source,terminal] +---- +$ oc scale --replicas=1 deployment $(oc get deployment -n | awk '/^quay-operator/ {print $1}') -n +---- + +. Check the status of the {productname} deployment: ++ +[source,terminal] +---- +$ oc wait quayregistry registry --for=condition=Available=true -n +---- ++ +Example output: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + ... + name: registry + namespace: + ... +spec: + ... +status: + - lastTransitionTime: '2022-06-20T05:31:17Z' + lastUpdateTime: '2022-06-20T17:31:13Z' + message: All components reporting as healthy + reason: HealthChecksPassing + status: 'True' + type: Available +---- diff --git a/modules/backing-up-red-hat-quay-standalone.adoc b/modules/backing-up-red-hat-quay-standalone.adoc new file mode 100644 index 000000000..b9061e5c8 --- /dev/null +++ b/modules/backing-up-red-hat-quay-standalone.adoc @@ -0,0 +1,137 @@ +:_content-type: PROCEDURE +[[backing-up-red-hat-quay-standalone]] += Backing up {productname} on standalone deployments + +This procedure describes how to create a backup of {productname} on standalone deployments. + +.Prerequisites + +.Procedure + +. Create a temporary backup directory, for example, `quay-backup`: ++ +[source,terminal] +---- +$ mkdir /tmp/quay-backup +---- + +. The following example command denotes the local directory that the {productname} was started in, for example, `/opt/quay-install`: ++ +[subs="verbatim,attributes"] +---- +$ podman run --name quay-app \ + -v /opt/quay-install/config:/conf/stack:Z \ + -v /opt/quay-install/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- ++ +Change into the directory that bind-mounts to `/conf/stack` inside of the container, for example, `/opt/quay-install`, by running the following command: ++ +[source,terminal] +---- +$ cd /opt/quay-install +---- + +. Compress the contents of your {productname} deployment into an archive in the `quay-backup` directory by entering the following command: ++ +[source,terminal] +---- +$ tar cvf /tmp/quay-backup/quay-backup.tar.gz * +---- ++ +Example output: ++ +[source,terminal] +---- +config.yaml +config.yaml.bak +extra_ca_certs/ +extra_ca_certs/ca.crt +ssl.cert +ssl.key +---- + +. Back up the Quay container service by entering the following command: ++ +[subs="verbatim,attributes"] +---- +$ podman inspect quay-app | jq -r '.[0].Config.CreateCommand | .[]' | paste -s -d ' ' - + + /usr/bin/podman run --name quay-app \ + -v /opt/quay-install/config:/conf/stack:Z \ + -v /opt/quay-install/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- + +. Redirect the contents of your `conf/stack/config.yaml` file to your temporary `quay-config.yaml` file by entering the following command: ++ +[source,terminal] +---- +$ podman exec -it quay cat /conf/stack/config.yaml > /tmp/quay-backup/quay-config.yaml +---- + +. Obtain the `DB_URI` located in your temporary `quay-config.yaml` by entering the following command: ++ +[source,terminal] +---- +$ grep DB_URI /tmp/quay-backup/quay-config.yaml +---- ++ +Example output: ++ +---- +$ postgresql://:test123@172.24.10.50/quay +---- + +. Extract the PostgreSQL contents to your temporary backup directory in a backup .sql file by entering the following command: ++ +[source,terminal] +---- +$ pg_dump -h 172.24.10.50 -p 5432 -d quay -U -W -O > /tmp/quay-backup/quay-backup.sql +---- + +. Print the contents of your `DISTRIBUTED_STORAGE_CONFIG` by entering the following command: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - S3Storage + - s3_bucket: + storage_path: /registry + s3_access_key: + s3_secret_key: + host: +---- + +. Export the `AWS_ACCESS_KEY_ID` by using the `access_key` credential obtained in Step 7: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID= +---- + +. Export the `AWS_SECRET_ACCESS_KEY` by using the `secret_key` obtained in Step 7: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY= +---- + +. Sync the `quay` bucket to the `/tmp/quay-backup/blob-backup/` directory from the `hostname` of your `DISTRIBUTED_STORAGE_CONFIG`: ++ +[source,terminal] +---- +$ aws s3 sync s3:// /tmp/quay-backup/blob-backup/ --source-region us-east-2 +---- ++ +Example output: ++ +---- +download: s3:///registry/sha256/9c/9c3181779a868e09698b567a3c42f3744584ddb1398efe2c4ba569a99b823f7a to registry/sha256/9c/9c3181779a868e09698b567a3c42f3744584ddb1398efe2c4ba569a99b823f7a +download: s3:///registry/sha256/e9/e9c5463f15f0fd62df3898b36ace8d15386a6813ffb470f332698ecb34af5b0d to registry/sha256/e9/e9c5463f15f0fd62df3898b36ace8d15386a6813ffb470f332698ecb34af5b0d +---- +[NOTE] +==== +It is recommended that you delete the `quay-config.yaml` file after syncing the `quay` bucket because it contains sensitive information. The `quay-config.yaml` file will not be lost because it is backed up in the `quay-backup.tar.gz` file. +==== diff --git a/modules/branding-quay-deployment.adoc b/modules/branding-quay-deployment.adoc new file mode 100644 index 000000000..dbfb04f48 --- /dev/null +++ b/modules/branding-quay-deployment.adoc @@ -0,0 +1,34 @@ +:_content-type: PROCEDURE +[id="branding-quay-deployment"] += Branding a {productname} deployment on the legacy UI + +You can brand the UI of your {productname} deployment by changing the registry title, logo, footer image, and by directing users to a website embedded in the footer image. + +.Procedure + +. Update your {productname} `config.yaml` file to add the following parameters: ++ +[source,yaml] +---- +BRANDING: + logo: <1> + footer_img: <2> + footer_url: <3> +--- +REGISTRY_TITLE: <4> +REGISTRY_TITLE_SHORT: <5> +---- +<1> The URL of the image that will appear at the top of your {productname} deployment. +<2> The URL of the image that will appear at the bottom of your {productname} deployment. +<3> The URL of the website that users will be directed to when clicking the footer image. +<4> The the long-form title for the registry. This is displayed in frontend of your {productname} deployment, for example, at the sign in page of your organization. +<5> The short-form title for the registry. The title is displayed on various pages of your organization, for example, as the title of the tutorial on your organization's *Tutorial* page. + +. Restart your {productname} deployment. After restarting, your {productname} deployment is updated with a new logo, footer image, and footer image URL. + +//// + +[role="_additional-resources"] +== Additional resources + +* \ No newline at end of file diff --git a/modules/build-limitations.adoc b/modules/build-limitations.adoc index 8568f22b0..57e611fb9 100644 --- a/modules/build-limitations.adoc +++ b/modules/build-limitations.adoc @@ -3,4 +3,4 @@ Running builds in {productname} in an unprivileged context might cause some commands that were working under the previous build strategy to fail. Attempts to change the build strategy could potentially cause performance issues and reliability with the build. -Running builds direclty in a container will not have the same isolation as using virtual machines. Changing the build environment might also caused builds that were previously working to fail. +Running builds directly in a container will not have the same isolation as using virtual machines. Changing the build environment might also caused builds that were previously working to fail. diff --git a/modules/builders-virtual-environment.adoc b/modules/builders-virtual-environment.adoc index 5edb84071..73a33aceb 100644 --- a/modules/builders-virtual-environment.adoc +++ b/modules/builders-virtual-environment.adoc @@ -1,34 +1,39 @@ -[[setting-up-builders]] -= Creating a {productname} builders environment with OpenShift +:_content-type: CONCEPT +[id="builders-virtual-environment"] += Creating a {productname} builders environment with {ocp} -== OpenShift TLS component +The procedures in this section explain how to create a {productname} virtual builders environment with {ocp}. -The {productname} 3.6 Operator has introduced the `tls` component which allows you to control TLS configuration. +[id="openshift-tls-component"] +== {ocp} TLS component + +The `tls` component allows you to control TLS configuration. [NOTE] ==== -{productname} 3.6 does not support builders when the TLS component is managed by the Operator. +{productname} {producty} does not support builders when the TLS component is managed by the Operator. ==== -If you set `tls` to `unmanaged`, you supply your own `ssl.cert` and `ssl.key` files. In this instance, if you want your cluster to support builders, you must add both the Quay route and the builder route name to the SAN list in the cert, or alternatively use a wildcard. To add the builder route, use the following format: +If you set `tls` to `unmanaged`, you supply your own `ssl.cert` and `ssl.key` files. In this instance, if you want your cluster to support builders, you must add both the Quay route and the builder route name to the SAN list in the cert, or use a wildcard. + +To add the builder route, use the following format: [source,bash] ---- [quayregistry-cr-name]-quay-builder-[ocp-namespace].[ocp-domain-name]:443 ---- -[[red-hat-quay-quota-builders-establishment]] -== Using OpenShift Container Platform for {productname} builders +[id="red-hat-quay-quota-builders-establishment"] +== Using {ocp} for {productname} builders -The following procedure describes how you can implement the builders feature in {productname}. +Builders require SSL/TLS certificates. For more information about SSL/TLS certificates, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/advanced_red_hat_quay_deployment#using_ssl_to_protect_connections_to_red_hat_quay[Adding TLS certificates to the {productname} container]. -.Prerequisites +If you are using Amazon Web Service (AWS) S3 storage, you must modify your storage bucket in the AWS console, prior to running builders. See "Modifying your AWS S3 storage bucket" in the following section for the required parameters. -* Builders require SSL certificates. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/advanced_red_hat_quay_deployment#using_ssl_to_protect_connections_to_red_hat_quay[Adding TLS certificates to the {productname} container]. +[id="red-hat-quay-setting-up-builders"] +=== Preparing {ocp} for virtual builders -* If you are using AWS S3 storage, you must modify your storage bucket in the AWS console, prior to running builders. See "Modifying your AWS S3 storage bucket" in the following section for the required parameters. - -.Procedure +Use the following procedure to prepare {ocp} for {productname} virtual builders. [NOTE] ==== @@ -36,21 +41,18 @@ The following procedure describes how you can implement the builders feature in * This procedure is for setting up a virtual namespace on OpenShift Container Platform. ==== +.Procedure +. Log in to your {productname} cluster using a cluster administrator account. -[[red-hat-quay-setting-up-builders]] -=== Preparing OpenShift Container Platform for virtual builders - -. Log in to your {productname} cluster using a cluster admin account. - -. Create a new project where your virtual builders will be run (e.g., `virtual-builders`). +. Create a new project where your virtual builders will be run, for example, `virtual-builders`, by running the following command: + [source,terminal] ---- $ oc new-project virtual-builders ---- -. Create a `ServiceAccount` in this `Project` that will be used to run builds. +. Create a `ServiceAccount` in the project that will be used to run builds by entering the following command: + [source,terminal] ---- @@ -64,7 +66,7 @@ $ oc create sa -n virtual-builders quay-builder $ oc adm policy -n virtual-builders add-role-to-user edit system:serviceaccount:virtual-builders:quay-builder ---- -. Grant the Quay builder `anyuid scc` permissions: +. Grant the Quay builder `anyuid scc` permissions by entering the following command: + [source,terminal] ---- @@ -76,28 +78,36 @@ $ oc adm policy -n virtual-builders add-scc-to-user anyuid -z quay-builder This action requires cluster admin privileges. This is required because builders must run as the Podman user for unprivileged or rootless builds to work. ==== -. Obtain the token for the Quay builder service account: +. Obtain the token for the Quay builder service account. + +.. If using {ocp} 4.10 or an earlier version, enter the following command: ++ +[source,terminal] +---- +oc sa get-token -n virtual-builders quay-builder +---- +.. If using {ocp} 4.11 or later, enter the following command: + [source,terminal] ---- -$ oc sa get-token -n virtual-builders quay-builder +$ oc create token quay-builder -n virtual-builders ---- + -.Sample output +.Example output [source,terminal] ---- eyJhbGciOiJSUzI1NiIsImtpZCI6IldfQUJkaDVmb3ltTHZ0dGZMYjhIWnYxZTQzN2dJVEJxcDJscldSdEUtYWsifQ... ---- -. Determine the builder route: +. Determine the builder route by entering the following command: + [source,terminal] ---- $ oc get route -n quay-enterprise ---- + -.Sample output +.Example output [source,terminal] ---- NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD @@ -106,17 +116,27 @@ example-registry-quay-builder example-registry-quay-builder-quay-enterpr ... ---- -. Generate a self-signed SSL certificate with the .crt extension: +. Generate a self-signed SSL/TlS certificate with the .crt extension by entering the following command: + +[source,terminal] ---- -$ SECRET=$(oc get sa openshift-apiserver-sa --namespace=openshift-apiserver -o json | jq -r '.secrets[] | select(.name | contains("openshift-apiserver-sa-token"))'.name) +$ oc extract cm/kube-root-ca.crt -n openshift-apiserver ---- + +.Example output +[source,terminal] ---- -$ oc get secret $SECRET -n openshift-apiserver -o json | jq '.data."ca.crt"' -r | base64 -d > extra_ca_cert_build_cluster.crt +ca.crt ---- -. Locate the secret for you config bundle in the Console, and choose Actions -> Edit Secret and add the appropriate builder configuration: +. Rename the `ca.crt` file to `extra_ca_cert_build_cluster.crt` by entering the following command: ++ +[source,terminal] +---- +$ mv ca.crt extra_ca_cert_build_cluster.crt +---- + +. Locate the secret for you configuration bundle in the *Console*, and select *Actions* -> *Edit Secret* and add the appropriate builder configuration: + [source,yaml] ---- @@ -132,44 +152,48 @@ BUILD_MANAGER: - ephemeral - ALLOWED_WORKER_COUNT: 1 ORCHESTRATOR_PREFIX: buildman/production/ + JOB_REGISTRATION_TIMEOUT: 3600 <2> ORCHESTRATOR: - REDIS_HOST: <2> + REDIS_HOST: <3> REDIS_PASSWORD: "" REDIS_SSL: false REDIS_SKIP_KEYSPACE_EVENT_SETUP: false EXECUTORS: - EXECUTOR: kubernetesPodman NAME: openshift - BUILDER_NAMESPACE: <3> + BUILDER_NAMESPACE: <4> SETUP_TIME: 180 - MINIMUM_RETRY_THRESHOLD: 1 - BUILDER_CONTAINER_IMAGE: <4> + MINIMUM_RETRY_THRESHOLD: 0 + BUILDER_CONTAINER_IMAGE: <5> # Kubernetes resource options - K8S_API_SERVER: <5> - K8S_API_TLS_CA: <6> + K8S_API_SERVER: <6> + K8S_API_TLS_CA: <7> VOLUME_SIZE: 8G KUBERNETES_DISTRIBUTION: openshift - CONTAINER_MEMORY_LIMITS: 300Mi - CONTAINER_CPU_LIMITS: 1G <7> - CONTAINER_MEMORY_REQUEST: 300Mi - CONTAINER_CPU_REQUEST: 1G + CONTAINER_MEMORY_LIMITS: 300m <8> + CONTAINER_CPU_LIMITS: 1G <9> + CONTAINER_MEMORY_REQUEST: 300m <10> + CONTAINER_CPU_REQUEST: 1G <11> NODE_SELECTOR_LABEL_KEY: "" NODE_SELECTOR_LABEL_VALUE: "" SERVICE_ACCOUNT_NAME: - SERVICE_ACCOUNT_TOKEN: <8> ----- -+ -<1> The build route is obtained by running `oc get route -n` with the name of your OpenShift Operators namespace. A port must be provided at the end of the route, for example, and it should follow the following format: `[quayregistry-cr-name]-quay-builder-[ocp-namespace].[ocp-domain-name]:443`. -//<> If the `JOB_REGISTRATION_TIMEOUT` parameter is set too low, you might receive the following error: `failed to register job to build manager: rpc error: code = Unauthenticated desc = Invalid build token: Signature has expired`. It is suggested that this parameter be set to at least 240. -<2> If your Redis host has a password or SSL certificates, you must update accordingly. -<3> Set to match the name of your virtual builders namespace, for example, `virtual-builders`. -<4> For early access, the `BUILDER_CONTAINER_IMAGE` is currently `quay.io/projectquay/quay-builder:3.7.0-rc.2`. Note that this might change during the early access window. In the event this happens, customers will be alerted. -<5> Obtained by running `oc cluster-info`. -<6> You must manually create and add your custom CA cert, for example, `K8S_API_TLS_CA: extra_ca_cert_build_cluster.crt` -<7> For virtual builds, you must ensure that there are enough resources in your cluster. -<8> Obtained when running `oc create sa`. -+ -.Sample config + SERVICE_ACCOUNT_TOKEN: <12> +---- ++ +<1> The build route is obtained by running `oc get route -n` with the name of your OpenShift Operator's namespace. A port must be provided at the end of the route, and it should use the following format: `[quayregistry-cr-name]-quay-builder-[ocp-namespace].[ocp-domain-name]:443`. +<2> If the `JOB_REGISTRATION_TIMEOUT` parameter is set too low, you might receive the following error: `failed to register job to build manager: rpc error: code = Unauthenticated desc = Invalid build token: Signature has expired`. It is suggested that this parameter be set to at least 240. +<3> If your Redis host has a password or SSL/TLS certificates, you must update accordingly. +<4> Set to match the name of your virtual builders namespace, for example, `virtual-builders`. +<5> For early access, the `BUILDER_CONTAINER_IMAGE` is currently `quay.io/projectquay/quay-builder:3.7.0-rc.2`. Note that this might change during the early access window. If this happens, customers are alerted. +<6> The `K8S_API_SERVER` is obtained by running `oc cluster-info`. +<7> You must manually create and add your custom CA cert, for example, `K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build_cluster.crt`. +<8> Defaults to `5120Mi` if left unspecified. +<9> For virtual builds, you must ensure that there are enough resources in your cluster. Defaults to `1000m` if left unspecified. +<10> Defaults to `3968Mi` if left unspecified. +<11> Defaults to `500m` if left unspecified. +<12> Obtained when running `oc create sa`. ++ +.Sample configuration [source,yaml] ---- FEATURE_USER_INITIALIZE: true @@ -184,6 +208,7 @@ BUILD_MANAGER: - ephemeral - ALLOWED_WORKER_COUNT: 1 ORCHESTRATOR_PREFIX: buildman/production/ + JOB_REGISTRATION_TIMEOUT: 3600 ORCHESTRATOR: REDIS_HOST: example-registry-quay-redis REDIS_PASSWORD: "" @@ -194,16 +219,16 @@ BUILD_MANAGER: NAME: openshift BUILDER_NAMESPACE: virtual-builders SETUP_TIME: 180 - MINIMUM_RETRY_THRESHOLD: 1 + MINIMUM_RETRY_THRESHOLD: 0 BUILDER_CONTAINER_IMAGE: quay.io/projectquay/quay-builder:3.7.0-rc.2 # Kubernetes resource options K8S_API_SERVER: api.docs.quayteam.org:6443 - K8S_API_TLS_CA: /conf/stack/extra_ca_cert_build_cluster.crt + K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build_cluster.crt VOLUME_SIZE: 8G KUBERNETES_DISTRIBUTION: openshift - CONTAINER_MEMORY_LIMITS: 1Gi + CONTAINER_MEMORY_LIMITS: 1G CONTAINER_CPU_LIMITS: 1080m - CONTAINER_MEMORY_REQUEST: 1Gi + CONTAINER_MEMORY_REQUEST: 1G CONTAINER_CPU_REQUEST: 580m NODE_SELECTOR_LABEL_KEY: "" NODE_SELECTOR_LABEL_VALUE: "" @@ -211,24 +236,24 @@ BUILD_MANAGER: SERVICE_ACCOUNT_TOKEN: "eyJhbGciOiJSUzI1NiIsImtpZCI6IldfQUJkaDVmb3ltTHZ0dGZMYjhIWnYxZTQzN2dJVEJxcDJscldSdEUtYWsifQ" ---- -[[red-hat-quay-manual-ssl-for-builders]] -=== Manually adding SSL certificates. +[id="red-hat-quay-manual-ssl-for-builders"] +=== Manually adding SSL/TLS certificates -[IMPORTANT] -==== -* Due to a known issue with the configuration tool, you must manually add your custom SSL certificates to properly run builders. Use the following procedure to manually add custom SSL certificates. For more information creating SSL certificates, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/advanced_red_hat_quay_deployment#using_ssl_to_protect_connections_to_red_hat_quay[Adding TLS certificates to the {productname} container]. -==== +Due to a known issue with the configuration tool, you must manually add your custom SSL/TLS certificates to properly run builders. Use the following procedure to manually add custom SSL/TLS certificates. +For more information creating SSL/TLS certificates, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/advanced_red_hat_quay_deployment#using_ssl_to_protect_connections_to_red_hat_quay[Adding TLS certificates to the {productname} container]. -==== Create and sign certs -. Create a certificate authority and sign a certificate. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/advanced_red_hat_quay_deployment#create-a-ca-and-sign-a-certificate[Create a Certificate Authority and sign a certificate]. -+ -[NOTE] -==== -* Add an `alt_name` for the URL of your Quay registry. -* Add an `alt_name` for the `BUILDMAN_HOSTNAME` that is specified in your config.yaml. +[id="create-sign-certificates"] +==== Creating and signing certificates + +Use the following procedure to create and sign an SSL/TLS certificate. + +.Procedure +* Create a certificate authority and sign a certificate. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/advanced_red_hat_quay_deployment#create-a-ca-and-sign-a-certificate[Create a Certificate Authority and sign a certificate]. ++ +.openssl.cnf [source,terminal] ---- [req] @@ -240,11 +265,11 @@ basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment subjectAltName = @alt_names [alt_names] -DNS.1 = example-registry-quay-quay-enterprise.apps.docs.quayteam.org -DNS.2 = example-registry-quay-builder-quay-enterprise.apps.docs.quayteam.org +DNS.1 = example-registry-quay-quay-enterprise.apps.docs.quayteam.org <1> +DNS.2 = example-registry-quay-builder-quay-enterprise.apps.docs.quayteam.org <2> ---- - -==== +<1> An `alt_name` for the URL of your {productname} registry must be included. +<2> An `alt_name` for the `BUILDMAN_HOSTNAME` + .Sample commands [source,terminal] @@ -256,19 +281,23 @@ $ openssl req -new -key ssl.key -out ssl.csr $ openssl x509 -req -in ssl.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out ssl.cert -days 356 -extensions v3_req -extfile openssl.cnf ---- +[id="setting-tls-unmanaged"] +==== Setting TLS to unmanaged -==== Set TLS to unmanaged +Use the following procedure to set `king:tls` to unmanaged. -In your Quay Registry yaml, set `kind: tls` to `managed: false`: +.Procedure +. In your {productname} Registry YAML, set `kind: tls` to `managed: false`: ++ [source,yaml] ---- - kind: tls managed: false ---- -In the events, you should see that the change is blocked until you set up the appropriate config: - +. On the *Events* page, the change is blocked until you set up the appropriate `config.yaml` file. For example: ++ [source,yaml] ---- - lastTransitionTime: '2022-03-28T12:56:49Z' @@ -281,22 +310,31 @@ In the events, you should see that the change is blocked until you set up the ap ---- +[id="creating-temporary-secrets"] +==== Creating temporary secrets -==== Create temporary secrets +Use the following procedure to create temporary secrets for the CA certificate. + +.Procedure -. Create a secret in your default namespace for the CA cert: +. Create a secret in your default namespace for the CA certificate: + ---- $ oc create secret generic -n quay-enterprise temp-crt --from-file extra_ca_cert_build_cluster.crt ---- -. Create a secret in your default namespace for the ssl.key and ssl.cert files: +. Create a secret in your default namespace for the `ssl.key` and `ssl.cert` files: + ---- $ oc create secret generic -n quay-enterprise quay-config-ssl --from-file ssl.cert --from-file ssl.key ---- -==== Copy secret data to config.yaml +[id="copying-secret-data-to-config"] +==== Copying secret data to the configuration YAML + +Use the following procedure to copy secret data to your `config.yaml` file. + +.Procedure . Locate the new secrets in the console UI at *Workloads* -> *Secrets*. @@ -338,18 +376,14 @@ data: type: Opaque ---- - - -. Locate the secret for your Quay Registry configuration bundle in the UI, or via the command line by running a command such as: +. Locate the secret for your {productname} registry configuration bundle in the UI, or through the command line by running a command like the following: + [source,terminal] ---- $ oc get quayregistries.quay.redhat.com -o jsonpath="{.items[0].spec.configBundleSecret}{'\n'}" -n quay-enterprise ---- - - -. Edit the YAML for your config bundle secret, adding the data from the two secrets you created: +. In the {ocp} console, select the YAML tab for your configuration bundle secret, and add the data from the two secrets you created: + [source,yaml] ---- @@ -374,15 +408,16 @@ data: type: Opaque ---- +. Click *Save*. -. Click *Save*. You should see the pods being re-started: +. Enter the following command to see if your pods are restarting: + [source,terminal] ---- $ oc get pods -n quay-enterprise ---- + -.Sample output +.Example output [source,terminal] ---- NAME READY STATUS RESTARTS AGE @@ -399,14 +434,14 @@ example-registry-quay-mirror-764d7b68d9-jqzwg 1/1 Terminating example-registry-quay-redis-7cc5f6c977-956g8 1/1 Running 0 5d21h ---- -. After your Quay registry has reconfigured, check that your Quay app pods are running: +. After your {productname} registry has reconfigured, enter the following command to check if the {productname} app pods are running: + [source,terminal] ---- $ oc get pods -n quay-enterprise ---- + -.Sample output +.Example output [source,terminal] ---- example-registry-quay-app-6786987b99-sz6kb 1/1 Running 0 7m45s @@ -419,8 +454,7 @@ example-registry-quay-mirror-758fc68ff7-lbl82 1/1 Running example-registry-quay-redis-7cc5f6c977-956g8 1/1 Running 0 5d21h ---- - -. In your browser, access the registry endpoint and validate that the certificate has been updated appropriately: +. In your browser, access the registry endpoint and validate that the certificate has been updated appropriately. For example: + [source,terminal] ---- @@ -429,19 +463,27 @@ Organisation (O) DOCS Organisational Unit (OU) QUAY ---- - -[[red-hat-quay-builders-ui]] +[id="red-hat-quay-builders-ui"] === Using the UI to create a build trigger -. Log in to your Quay repository. +Use the following procedure to use the UI to create a build trigger. + +.Procedure + +. Log in to your {productname} repository. . Click *Create New Repository* and create a new registry, for example, `testrepo`. -. On the *Repositories* page, click *Builds* tab on the left hand pane. Alternatively, use the corresponding URL directly, for example: +. On the *Repositories* page, click the *Builds* tab on the navigation pane. Alternatively, use the corresponding URL directly: + ---- https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/repository/quayadmin/testrepo?tab=builds ---- ++ +[IMPORTANT] +==== +In some cases, the builder might have issues resolving hostnames. This issue might be related to the `dnsPolicy` being set to `default` on the job object. Currently, there is no workaround for this issue. It will be resolved in a future version of {productname}. +==== . Click *Create Build Trigger* -> *Custom Git Repository Push*. @@ -465,26 +507,39 @@ https://github.com/gabriel-rh/actions_test.git . Enter a commit SHA from the Git repository and click *Start Build*. -. You can check the status of your build by clicking the commit in the *Build History* page, or by running `oc get pods -n virtual-builders`. +. You can check the status of your build by clicking the commit in the *Build History* page, or by running `oc get pods -n virtual-builders`. For example: + ---- - $ oc get pods -n virtual-builders +$ oc get pods -n virtual-builders +---- ++ +.Example output +---- NAME READY STATUS RESTARTS AGE f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s ---- + +[source,terminal] ---- $ oc get pods -n virtual-builders +---- ++ +.Example output +---- NAME READY STATUS RESTARTS AGE f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Terminating 0 9s ---- + ---- $ oc get pods -n virtual-builders +---- ++ +.Example output +---- No resources found in virtual-builders namespace. ---- -. When the build is finished, you can check the status of the tag under *Tags* on the left hand pane. +. When the build is finished, you can check the status of the tag under *Tags* on the navigation pane. + [NOTE] ==== @@ -492,10 +547,12 @@ With early access, full build logs and timestamps of builds are currently unavai ==== -[[red-hat-quay-s3-bucket-modify]] +[id="red-hat-quay-s3-bucket-modify"] === Modifying your AWS S3 storage bucket -If you are using AWS S3 storage, you must modify your storage bucket in the AWS console, prior to running builders. +If you are using AWS S3 storage, you must change your storage bucket in the AWS console, prior to running builders. + +.Procedure . Log in to your AWS console at link:https://s3.console.aws.amazon.com[s3.console.aws.com]. diff --git a/modules/clair-advanced-configuration-overview.adoc b/modules/clair-advanced-configuration-overview.adoc new file mode 100644 index 000000000..e036fd434 --- /dev/null +++ b/modules/clair-advanced-configuration-overview.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="clair-advanced-configuration-overview"] += Advanced Clair configuration + +Use the procedures in the following sections to configure advanced Clair settings. \ No newline at end of file diff --git a/modules/clair-airgap.adoc b/modules/clair-airgap.adoc new file mode 100644 index 000000000..553b79a7e --- /dev/null +++ b/modules/clair-airgap.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="clair-airgap"] +== Air gapped Clair + +For flexability, Clair supports running updaters in a separate environment and importing the results. This is aimed at supporting installations that reject the Clair cluster from communication with the internet directly. diff --git a/modules/clair-analyses.adoc b/modules/clair-analyses.adoc index 84fdf43b0..fd368ddf9 100644 --- a/modules/clair-analyses.adoc +++ b/modules/clair-analyses.adoc @@ -11,17 +11,17 @@ Once a `Manifest` is indexed, the `IndexReport` is persisted for later retrieval - **Matching**: Matching is taking an `IndexReport` and correlating vulnerabilities affecting the `Manifest` the report represents. + -Clair continuously ingests new security data and a request to the matcher will always provide users with the most to date vulnerability analysis of an `IndexReport`. +Clair continuously ingests new security data and a request to the matcher will always provide users with the most up to date vulnerability analysis of an `IndexReport`. - **Notifications**: Clair implements a notification service. When new vulnerabilities are discovered, the notifier service will determine if these vulnerabilities affect any indexed `Manifests`. The notifier will then take action according to its configuration. == Notifications for vulnerabilities found by Clair -{productname} 3.4 triggers different notifications for various repository events. These notifications vary based on enabled features. +Since {productname} 3.4, different notifications are triggered for various repository events. These notifications vary based on enabled features. [NOTE] ==== -This include the event type `Package Vulnerability Found` +This includes the event type `Package Vulnerability Found` ==== `Additional Filter` can be applied for `Security Level`, and there are various notification methods. Custom notification titles are also optional. diff --git a/modules/clair-authentication.adoc b/modules/clair-authentication.adoc new file mode 100644 index 000000000..a2f3a6276 --- /dev/null +++ b/modules/clair-authentication.adoc @@ -0,0 +1,31 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-authentication"] += Clair authentication + +In its current iteration, Clair v4 (Clair) handles authentication internally. + +[NOTE] +==== +Previous versions of Clair used JWT Proxy to gate authentication. +==== + +Authentication is configured by specifying configuration objects underneath the `auth` key of the configuration. Multiple authentication configurations might be present, but they are used preferentially in the following order: + +. PSK. With this authentication configuration, Clair implements JWT-based authentication using a pre-shared key. + +. Configuration. For example: ++ +[source,yaml] +---- +auth: + psk: + key: >- + MDQ4ODBlNDAtNDc0ZC00MWUxLThhMzAtOTk0MzEwMGQwYTMxCg== + iss: 'issuer' +---- ++ +In this configuration the `auth` field requires two parameters: `iss`, which is the issuer to validate all incoming requests, and `key`, which is a base64 coded symmetric key for validating the requests. \ No newline at end of file diff --git a/modules/clair-clairctl-standalone.adoc b/modules/clair-clairctl-standalone.adoc new file mode 100644 index 000000000..501e5057f --- /dev/null +++ b/modules/clair-clairctl-standalone.adoc @@ -0,0 +1,30 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-disconnected-standalone-configuration"] += Setting up a self-managed deployment of Clair for a disconnected {ocp} cluster + +Use the following procedures to set up a self-managed deployment of Clair for a disconnected {ocp} cluster. + +[id="clair-clairctl-standalone"] +== Installing the clairctl command line utility tool for a self-managed Clair deployment on {ocp} + +Use the following procedure to install the `clairctl` CLI tool for self-managed Clair deployments on {ocp}. + +.Procedure + +. Install the `clairctl` program for a self-managed Clair deployment by using the `podman cp` command, for example: ++ +[source,terminal] +---- +$ sudo podman cp clairv4:/usr/bin/clairctl ./clairctl +---- + +. Set the permissions of the `clairctl` file so that it can be executed and run by the user, for example: ++ +[source,terminal] +---- +$ chmod u+x ./clairctl +---- \ No newline at end of file diff --git a/modules/clair-clairctl.adoc b/modules/clair-clairctl.adoc index 3c2770417..a76527c47 100644 --- a/modules/clair-clairctl.adoc +++ b/modules/clair-clairctl.adoc @@ -1,17 +1,35 @@ -[[clair-clairctl]] -= Obtaining clairctl +// Module included in the following assemblies: +// +// clair/master.adoc -To obtain the `clairctl` program from a Clair deployment in an OpenShift cluster, use the `oc cp` command, for example: +:_content-type: PROCEDURE +[id="clair-disconnected-ocp-configuration"] += Setting up Clair in a disconnected {ocp} cluster ----- -$ oc -n quay-enterprise cp example-registry-clair-app-64dd48f866-6ptgw:/usr/bin/clairctl ./clairctl -$ chmod u+x ./clairctl ----- +Use the following procedures to set up an {ocp} provisioned Clair pod in a disconnected {ocp} cluster. + +[id="clair-clairctl-ocp"] +== Installing the clairctl command line utility tool for {ocp} deployments -For a standalone Clair deployment, use the `podman cp` command, for example: +Use the following procedure to install the `clairctl` CLI tool for {ocp} deployments. +.Procedure + +. Install the `clairctl` program for a Clair deployment in an {ocp} cluster by entering the following command: ++ +[source,terminal] ---- -$ sudo podman cp clairv4:/usr/bin/clairctl ./clairctl -$ chmod u+x ./clairctl +$ oc -n quay-enterprise exec example-registry-clair-app-64dd48f866-6ptgw -- cat /usr/bin/clairctl > clairctl ---- ++ +[NOTE] +==== +Unofficially, the `clairctl` tool can be downloaded +==== +. Set the permissions of the `clairctl` file so that it can be executed and run by the user, for example: ++ +[source,terminal] +---- +$ chmod u+x ./clairctl +---- \ No newline at end of file diff --git a/modules/clair-concepts.adoc b/modules/clair-concepts.adoc new file mode 100644 index 000000000..835535f77 --- /dev/null +++ b/modules/clair-concepts.adoc @@ -0,0 +1,153 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-concepts"] += Clair concepts + +The following sections provide a conceptual overview of how Clair works. + +[id="clair-practice"] +== Clair in practice + +A Clair analysis is broken down into three distinct parts: indexing, matching, and notification. + +[id="clair-indexing-concept"] +=== Indexing + +Clair's indexer service is responsible for indexing a manifest. In Clair, manifests are representations of a container image. The indexer service is the component that Clair uses to understand the contents of layers. Clair leverages the fact that Open Container Initiative (OCI) manifests and layers are content addressed to reduce duplicate work. + +Indexing involves taking a manifest representing a container image and computing its constituent parts. The indexer tries to discover what packages exist in the image, what distribution the image is derived from, and what package repositories are used within the image. When this information is computed, it is persisted into an `IndexReport`. + +The `IndexReport` is stored in Clair's database. It can be fed to a `matcher` node to compute the vulnerability report. + +[id="content-addressability"] +==== Content addressability + +Clair treats all manifests and layers as _content addressable_. In the context of Clair, content addressable means that when a specific manifest is indexed, it is not indexed again unless it is required; this is the same for individual layers. + +For example, consider how many images in a registry might use `ubuntu:artful` as a base layer. If the developers prefer basing their images off of Ubuntu, it could be a large majority of images. Treating the layers and manifests as content addressable means that Clair only fetches and analyzes the base layer one time. + +In some cases, Clair should re-index a manifest. For example, when an internal component such as a package scanner is updated, Clair performs the analysis with the new package scanner. Clair has enough information to determine that a component has changed and that the `IndexReport` might be different the second time, and as a result it re-indexes the manifest. + +A client can track Clair's `index_state` endpoint to understand when an internal component has changed, and can subsequently issue re-indexes. See the Clair API guide to learn how to view Clair's API specification. + +[id="clair-matching-concept"] +=== Matching + +With Clair, a matcher node is responsible for matching vulnerabilities to a provided `IndexReport`. + +Matchers are responsible for keeping the database of vulnerabilities up to date. Matchers will typically run a set of updaters, which periodically probe their data sources for new content. New vulnerabilities are stored in the database when they are discovered. + +The matcher API is designed to be used often. It is designed to always provide the most recent `VulnerabilityReport` when queried. The `VulnerabilityReport` summarizes both a manifest's content and any vulnerabilities affecting the content. + +// See. . . to learn more about how to view the Clair API specification and to work with the matcher API. + +[id="remote-matching"] +==== Remote matching + +A remote matcher acts similar to a matcher, however remote matchers use API calls to fetch vulnerability data for a provided `IndexReport`. Remote matchers are useful when it is impossible to persist data from a given source into the database. + +The CRDA remote matcher is responsible for fetching vulnerabilities from Red Hat Code Ready Dependency Analytics (CRDA). By default, this matcher serves 100 requests per minute. The rate limiting can be lifted by requesting a dedicated API key, which is done by submitting link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form]. + +To enable CRDA remote matching, see "Enabling CRDA for Clair". + +[id="clair-notifications-concept"] +=== Notifications + +Clair uses a notifier service that keeps track of new security database updates and informs users if new or removed vulnerabilities affect an indexed manifest. + +When the notifier becomes aware of new vulnerabilities affecting a previously indexed manifest, it uses the configured methods in your `config.yaml` file to issue notifications about the new changes. Returned notifications express the most severe vulnerability discovered because of the change. This avoids creating excessive notifications for the same security database update. + +When a user receives a notification, it issues a new request against the matcher to receive an up to date vulnerability report. + +The notification schema is the JSON marshalled form of the following types: + +[source,json] +---- +// Reason indicates the catalyst for a notification +type Reason string +const ( + Added Reason = "added" + Removed Reason = "removed" + Changed Reason = "changed" +) +type Notification struct { + ID uuid.UUID `json:"id"` + Manifest claircore.Digest `json:"manifest"` + Reason Reason `json:"reason"` + Vulnerability VulnSummary `json:"vulnerability"` +} +type VulnSummary struct { + Name string `json:"name"` + Description string `json:"description"` + Package *claircore.Package `json:"package,omitempty"` + Distribution *claircore.Distribution `json:"distribution,omitempty"` + Repo *claircore.Repository `json:"repo,omitempty"` + Severity string `json:"severity"` + FixedInVersion string `json:"fixed_in_version"` + Links string `json:"links"` +} +---- + +You can subscribe to notifications through the following mechanics: + +* Webhook delivery +* AMQP delivery +* STOMP delivery + +Configuring the notifier is done through the Clair YAML configuration file. + +[id=webhook-delivery] +==== Webhook delivery + +When you configure the notifier for webhook delivery, you provide the service with the following pieces of information: + +* A target URL where the webhook will fire. +* The callback URL where the notifier might be reached, including its API path. For example, `http://clair-notifier/notifier/api/v1/notifications`. + +When the notifier has determined an updated security database has been changed the affected status of an indexed manifest, it delivers the following JSON body to the configured target: + +[source,json] +---- +{ + "notifiction_id": {uuid_string}, + "callback": {url_to_notifications} +} +---- + +On receipt, the server can browse to the URL provided in the callback field. + +[id="amqp-delivery"] +==== AMQP delivery + +The Clair notifier also supports delivering notifications to an AMQP broker. With AMQP delivery, you can control whether a callback is delivered to the broker or whether notifications are directly delivered to the queue. This allows the developer of the AMQP consumer to determine the logic of notification processing. + +[NOTE] +==== +AMQP delivery only supports AMQP 0.x protocol (for example, RabbitMQ). If you need to publish notifications to AMQP 1.x message queue (for example, ActiveMQ), you can use STOMP delivery. +==== + +[id="amqp-direct-delivery"] +===== AMQP direct delivery + +If the Clair notifier's configuration specifies `direct: true` for AMQP delivery, notifications are delivered directly to the configured exchange. + +When `direct` is set, the `rollup` property might be set to instruct the notifier to send a maximum number of notifications in a single AMQP. This provides balance between the size of the message and the number of messages delivered to the queue. + +[id="notifier-testing-development"] +==== Notifier testing and development mode + +The notifier has a testing and development mode that can be enabled with the `NOTIFIER_TEST_MODE` parameter. This parameter can be set to any value. + +When the `NOTIFIER_TEST_MODE` parameter is set, the notifier begins sending fake notifications to the configured delivery mechanism every `poll_interval` interval. This provides an easy way to implement and test new or existing deliverers. + +The notifier runs in `NOTIFIER_TEST_MODE` until the environment variable is cleared and the service is restarted. + +[id="deleting-notifications"] +==== Deleting notifications + +To delete the notification, you can use the `DELETE` API call. Deleting a notification ID manually cleans up resources in the notifier. If you do not use the `DELETE` API call, the notifier waits a predetermined length of time before clearing delivered notifications from its database. + +// For more information on the `DELETE` API call, see. . . \ No newline at end of file diff --git a/modules/clair-crda-configuration.adoc b/modules/clair-crda-configuration.adoc new file mode 100644 index 000000000..975ca1997 --- /dev/null +++ b/modules/clair-crda-configuration.adoc @@ -0,0 +1,34 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-crda-configuration"] += Enabling Clair CRDA + +Java scanning depends on a public, Red Hat provided API service called Code Ready Dependency Analytics (CRDA). CRDA is only available with internet access and is not enabled by default. + +Use the following procedure to integrate the CRDA service with a custom API key and enable CRDA for Java and Python scanning. + +.Prerequisites + +* {productname} 3.7 or greater + +.Procedure + +. Submit link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form] to obtain the Quay-specific CRDA remote matcher. + +. Set the CRDA configuration in your `clair-config.yaml` file: ++ +[source,terminal] +---- +matchers: + config: + crda: + url: https://gw.api.openshift.io/api/v2/ + key: <1> + source: <2> +---- ++ +<1> Insert the Quay-specific CRDA remote matcher from link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form] here. +<2> The hostname of your Quay server. \ No newline at end of file diff --git a/modules/clair-cve.adoc b/modules/clair-cve.adoc index d27df1536..5c9142bae 100644 --- a/modules/clair-cve.adoc +++ b/modules/clair-cve.adoc @@ -1,10 +1,14 @@ -[[clair-cve]] +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-cve"] = CVE ratings from the National Vulnerability Database -With Clair v4.2, enrichment data is now viewable in the Quay UI. -Additionally, Clair v4.2 adds CVSS scores from the National Vulnerability Database for detected vulnerabilities. +As of Clair v4.2, Common Vulnerability Scoring System (CVSS) enrichment data is now viewable in the {productname} UI. Additionally, Clair v4.2 adds CVSS scores from the National Vulnerability Database for detected vulnerabilities. -With this change, if the vulnerability has a CVSS score that is within 2 levels of the distro's score, the Quay UI present's the distro's score by default. For example: +With this change, if the vulnerability has a CVSS score that is within 2 levels of the distribution score, the {productname} UI present's the distribution's score by default. For example: image:clair-4-2-enrichment-data.png[Clair v4.2 data display] diff --git a/modules/clair-disconnected.adoc b/modules/clair-disconnected.adoc index ee4ac0ec9..4241fe660 100644 --- a/modules/clair-disconnected.adoc +++ b/modules/clair-disconnected.adoc @@ -1,31 +1,18 @@ -[[clair-disconnected]] -= Configuring Clair for Disconnected Environments +// Module included in the following assemblies: +// +// clair/master.adoc -Clair utilizes a set of components called Updaters to handle the fetching and parsing of data from various vulnerability databases. These Updaters are set up by default to pull vulnerability data directly from the internet and work out of the box. For customers in disconnected environments without direct access to the internet this poses a problem. Clair supports these environments through the ability to work with different types of update workflows that take into account network isolation. Using the `clairctl` command line utility, any process can easily fetch Updater data from the internet via an open host, securely transfer the data to an isolated host, and then import the Updater data on the isolated host into Clair itself. +:_content-type: CONCEPT +[id="clair-disconnected-environments"] += Clair in disconnected environments -The steps are as follows. +Clair uses a set of components called _updaters_ to handle the fetching and parsing of data from various vulnerability databases. Updaters are set up by default to pull vulnerability data directly from the internet and work for immediate use. However, some users might require {productname} to run in a disconnected environment, or an environment without direct access to the internet. Clair supports disconnected environments by working with different types of update workflows that take network isolation into consideration. This works by using the `clairctl` command line interface tool, which obtains updater data from the internet by using an open host, securely transferring the data to an isolated host, and then important the updater data on the isolated host into Clair. -. First ensure that your Clair configuration has disabled automated Updaters from running. -+ -.config.yaml -[source,yaml] ----- -matcher: - disable_updaters: true +Use this guide to deploy Clair in a disconnected environment. ----- - -. Export out the latest Updater data to a local archive. This requires the `clairctl` tool which can be run directly as a binary, or via the Clair container image. Assuming your Clair configuration is in `/etc/clairv4/config/config.yaml`, to run via the container image: -+ -[subs="verbatim,attributes"] -``` -$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml export-updaters /updaters/updaters.gz -``` -+ -Note that you need to explicitly reference the Clair configuration. This will create the Updater archive in `/etc/clairv4/updaters/updaters.gz`. If you want to ensure the archive was created without any errors from the source databases, you can supply the `--strict` flag to `clairctl`. The archive file should be copied over to a volume that is accessible from the disconnected host running Clair. From the disconnected host, use the same procedure now to import the archive into Clair. -+ -[subs="verbatim,attributes"] -``` -$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml import-updaters /updaters/updaters.gz -``` +[NOTE] +==== +Currently, Clair enrichment data is CVSS data. Enrichment data is currently unsupported in disconnected environments. +==== +For more information about Clair updaters, see "Clair updaters". \ No newline at end of file diff --git a/modules/clair-export-bundle-standalone.adoc b/modules/clair-export-bundle-standalone.adoc new file mode 100644 index 000000000..bacdc3636 --- /dev/null +++ b/modules/clair-export-bundle-standalone.adoc @@ -0,0 +1,24 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-export-bundle-standalone"] += Exporting the updaters bundle from a connected Clair instance + +Use the following procedure to export the updaters bundle from a Clair instance that has access to the internet. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have deployed Clair. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. + +.Procedure + +* From a Clair instance that has access to the internet, use the `clairctl` CLI tool with your configuration file to export the updaters bundle. For example: ++ +[source,terminal] +---- +$ ./clairctl --config ./config.yaml export-updaters updates.gz +---- \ No newline at end of file diff --git a/modules/clair-export-bundle.adoc b/modules/clair-export-bundle.adoc index 23946906d..0c11ce7c2 100644 --- a/modules/clair-export-bundle.adoc +++ b/modules/clair-export-bundle.adoc @@ -1,9 +1,24 @@ -[[clair-export-bundle]] -= Exporting the updaters bundle +// Module included in the following assemblies: +// +// clair/master.adoc -From a Clair instance that has access to the internet, use `clairctl` with the appropriate configuration file to export the updaters bundle: +:_content-type: PROCEDURE +[id="clair-export-bundle"] += Exporting the updaters bundle from a connected Clair instance +Use the following procedure to export the updaters bundle from a Clair instance that has access to the internet. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have retrieved and decoded the Clair configuration secret, and saved it to a Clair `config.yaml` file. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. + +.Procedure + +* From a Clair instance that has access to the internet, use the `clairctl` CLI tool with your configuration file to export the updaters bundle. For example: ++ +[source,terminal] ---- $ ./clairctl --config ./config.yaml export-updaters updates.gz ----- - +---- \ No newline at end of file diff --git a/modules/clair-intro.adoc b/modules/clair-intro.adoc index 17a75c748..3338ac0fb 100644 --- a/modules/clair-intro.adoc +++ b/modules/clair-intro.adoc @@ -1,7 +1,7 @@ [[clair-intro]] = {productname} vulnerability scanning using Clair -Clair is equipped with three types of scanners, a matcher, and an updater: +Clair is equipped with three types of scanners, and a matcher and an updater: - **Distribution Scanner**: This scanner discovers `Distribution` information, which is typically the base operator system the layer demonstrates features of. diff --git a/modules/clair-intro2.adoc b/modules/clair-intro2.adoc deleted file mode 100644 index 7b0bd1290..000000000 --- a/modules/clair-intro2.adoc +++ /dev/null @@ -1,33 +0,0 @@ -[[clair-intro2]] -= Clair Security Scanning - -Clair is a set of micro services that can be used with {productname} -to perform vulnerability scanning of container images associated with a set of -Linux operating systems. The micro services design of Clair makes it -appropriate to run in a highly scalable configuration, where -components can be scaled separately as appropriate for enterprise environments. - -Clair uses the following vulnerability databases to scan for issues in your images: - -* Alpine SecDB database -* AWS UpdateInfo -* Debian Oval database -* Oracle Oval database -* RHEL Oval database -* SUSE Oval database -* Ubuntu Oval database -* Pyup.io (python) database - -For information on how Clair does security mapping with the different databases, see -link:https://quay.github.io/claircore/concepts/severity_mapping.html[ClairCore Severity Mapping]. - -[NOTE] -==== - -ifeval::["{productname}" == "Red Hat Quay"] -With the release of Red Hat Quay 3.4, the new Clair V4 (image {productrepo}/{clairimage} fully replaces the prior Clair V2 (image quay.io/redhat/clair-jwt). See below for how to run V2 in read-only mode while V4 is updating. -endif::[] -ifeval::["{productname}" == "Project Quay"] -With the release of Clair V4 (image clair), the previously used Clair V2 (image clair-jwt) is no longer used. See below for how to run V2 in read-only mode while V4 is updating. -endif::[] -==== diff --git a/modules/clair-openshift-airgap-database-standalone.adoc b/modules/clair-openshift-airgap-database-standalone.adoc new file mode 100644 index 000000000..cfd03e0e0 --- /dev/null +++ b/modules/clair-openshift-airgap-database-standalone.adoc @@ -0,0 +1,63 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-openshift-airgap-database-standalone"] += Configuring access to the Clair database in the disconnected {ocp} cluster + +Use the following procedure to configure access to the Clair database in your disconnected {ocp} cluster. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have deployed Clair. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. +* You have exported the updaters bundle from a Clair instance that has access to the internet. + +.Procedure + +. Determine your Clair database service by using the `oc` CLI tool, for example: +[source,terminal] ++ +---- +$ oc get svc -n quay-enterprise +---- ++ +.Example output ++ +[source,terminal] +---- +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +example-registry-clair-app ClusterIP 172.30.224.93 80/TCP,8089/TCP 4d21h +example-registry-clair-postgres ClusterIP 172.30.246.88 5432/TCP 4d21h +... +---- + +. Forward the Clair database port so that it is accessible from the local machine. For example: ++ +[source,terminal] +---- +$ oc port-forward -n quay-enterprise service/example-registry-clair-postgres 5432:5432 +---- + +. Update your Clair `config.yaml` file, for example: ++ +[source,yaml] +---- +indexer: + connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable <1> + scanlock_retry: 10 + layer_scan_concurrency: 5 + migrations: true + scanner: + repo: + rhel-repository-scanner: <2> + repo2cpe_mapping_file: /data/cpe-map.json + package: + rhel_containerscanner: <3> + name2repos_mapping_file: /data/repo-map.json +---- +<1> Replace the value of the `host` in the multiple `connstring` fields with `localhost`. +<2> For more information about the `rhel-repository-scanner` parameter, see "Mapping repositories to Common Product Enumeration information". +<3> For more information about the `rhel_containerscanner` parameter, see "Mapping repositories to Common Product Enumeration information". \ No newline at end of file diff --git a/modules/clair-openshift-airgap-database.adoc b/modules/clair-openshift-airgap-database.adoc index 456d27883..c0ca82b1e 100644 --- a/modules/clair-openshift-airgap-database.adoc +++ b/modules/clair-openshift-airgap-database.adoc @@ -1,34 +1,63 @@ -[[clair-openshift-airgap-database]] -= Configuring access to the Clair database in the air-gapped OpenShift cluster +// Module included in the following assemblies: +// +// clair/master.adoc -* Use `kubectl` to determine the Clair database service: +:_content-type: PROCEDURE +[id="clair-openshift-airgap-database"] += Configuring access to the Clair database in the disconnected {ocp} cluster + +Use the following procedure to configure access to the Clair database in your disconnected {ocp} cluster. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have retrieved and decoded the Clair configuration secret, and saved it to a Clair `config.yaml` file. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. +* You have exported the updaters bundle from a Clair instance that has access to the internet. + +.Procedure + +. Determine your Clair database service by using the `oc` CLI tool, for example: +[source,terminal] + ---- -$ kubectl get svc -n quay-enterprise - +$ oc get svc -n quay-enterprise +---- ++ +.Example output ++ +[source,terminal] +---- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE example-registry-clair-app ClusterIP 172.30.224.93 80/TCP,8089/TCP 4d21h example-registry-clair-postgres ClusterIP 172.30.246.88 5432/TCP 4d21h ... ---- -* Forward the Clair database port so that it is accessible from the local machine, for example: +. Forward the Clair database port so that it is accessible from the local machine. For example: + +[source,terminal] ---- -$ kubectl port-forward -n quay-enterprise service/example-registry-clair-postgres 5432:5432 +$ oc port-forward -n quay-enterprise service/example-registry-clair-postgres 5432:5432 ---- -* Update the Clair configuration file, replacing the value of the `host` in the multiple `connstring` fields with `localhost`, for example: +. Update your Clair `config.yaml` file, for example: + -.clair-config.yaml [source,yaml] ---- - ... - connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable - ... +indexer: + connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable <1> + scanlock_retry: 10 + layer_scan_concurrency: 5 + migrations: true + scanner: + repo: + rhel-repository-scanner: <2> + repo2cpe_mapping_file: /data/cpe-map.json + package: + rhel_containerscanner: <3> + name2repos_mapping_file: /data/repo-map.json ---- - -[NOTE] -==== -As an alternative to using `kubectl port-forward`, you can use `kubefwd` instead. With this method, there is no need to modify the `connstring` field in the Clair configuration file to use `localhost`. -==== +<1> Replace the value of the `host` in the multiple `connstring` fields with `localhost`. +<2> For more information about the `rhel-repository-scanner` parameter, see "Mapping repositories to Common Product Enumeration information". +<3> For more information about the `rhel_containerscanner` parameter, see "Mapping repositories to Common Product Enumeration information". diff --git a/modules/clair-openshift-airgap-import-bundle-standalone.adoc b/modules/clair-openshift-airgap-import-bundle-standalone.adoc new file mode 100644 index 000000000..577e85622 --- /dev/null +++ b/modules/clair-openshift-airgap-import-bundle-standalone.adoc @@ -0,0 +1,26 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-openshift-airgap-import-bundle-standalone"] += Importing the updaters bundle into the disconnected {ocp} cluster + +Use the following procedure to import the updaters bundle into your disconnected {ocp} cluster. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have deployed Clair. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. +* You have exported the updaters bundle from a Clair instance that has access to the internet. +* You have transferred the updaters bundle into your disconnected environment. + +.Procedure + +* Use the `clairctl` CLI tool to import the updaters bundle into the Clair database that is deployed by {ocp}: ++ +[source,terminal] +---- +$ ./clairctl --config ./clair-config.yaml import-updaters updates.gz +---- diff --git a/modules/clair-openshift-airgap-import-bundle.adoc b/modules/clair-openshift-airgap-import-bundle.adoc index 8a4ab5807..c4f28b537 100644 --- a/modules/clair-openshift-airgap-import-bundle.adoc +++ b/modules/clair-openshift-airgap-import-bundle.adoc @@ -1,8 +1,27 @@ -[[clair-openshift-airgap-import-bundle]] -= Importing the updaters bundle into the air-gapped environment -After transferring the updaters bundle to the air-gapped environment, use `clairctl` to import the bundle into the Clair database deployed by the OpenShift Operator: +// Module included in the following assemblies: +// +// clair/master.adoc +:_content-type: PROCEDURE +[id="clair-openshift-airgap-import-bundle"] += Importing the updaters bundle into the disconnected {ocp} cluster + +Use the following procedure to import the updaters bundle into your disconnected {ocp} cluster. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have retrieved and decoded the Clair configuration secret, and saved it to a Clair `config.yaml` file. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. +* You have exported the updaters bundle from a Clair instance that has access to the internet. +* You have transferred the updaters bundle into your disconnected environment. + +.Procedure + +* Use the `clairctl` CLI tool to import the updaters bundle into the Clair database that is deployed by {ocp}. For example: ++ +[source,terminal] ---- $ ./clairctl --config ./clair-config.yaml import-updaters updates.gz ---- diff --git a/modules/clair-openshift-config.adoc b/modules/clair-openshift-config.adoc index 9ed25f036..4689bc893 100644 --- a/modules/clair-openshift-config.adoc +++ b/modules/clair-openshift-config.adoc @@ -1,43 +1,35 @@ -[[clair-openshift-config]] -= Clair on OpenShift config +// Module included in the following assemblies: +// +// clair/master.adoc -To retrieve the configuration file for a Clair instance deployed using the OpenShift Operator, retrieve and decode the config secret using the appropriate namespace, and save it to file, for example: +:_content-type: PROCEDURE +[id="clair-openshift-config"] += Retrieving and decoding the Clair configuration secret for Clair deployments on {ocp} +Use the following procedure to retrieve and decode the configuration secret for an {ocp} provisioned Clair instance on {ocp}. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. + +.Procedure + +. Enter the following command to retrieve and decode the configuration secret, and then save it to a Clair configuration YAML: ++ +[source,terminal] ---- -$ kubectl get secret -n quay-enterprise example-registry-clair-config-secret -o "jsonpath={$.data['config\.yaml']}" | base64 -d > clair-config.yaml +$ oc get secret -n quay-enterprise example-registry-clair-config-secret -o "jsonpath={$.data['config\.yaml']}" | base64 -d > clair-config.yaml ---- -An excerpt from a Clair configuration file is shown below: - -.clair-config.yaml +. Update the `clair-config.yaml` file so that the `disable_updaters` and `airgap` parameters are set to `true`, for example: ++ [source,yaml] ---- -http_listen_addr: :8080 -introspection_addr: "" -log_level: info +--- indexer: - connstring: host=example-registry-clair-postgres port=5432 dbname=postgres user=postgres password=postgres sslmode=disable - scanlock_retry: 10 - layer_scan_concurrency: 5 - migrations: true - scanner: - package: {} - dist: {} - repo: {} - airgap: false + airgap: true +--- matcher: - connstring: host=example-registry-clair-postgres port=5432 dbname=postgres user=postgres password=postgres sslmode=disable - max_conn_pool: 100 - indexer_addr: "" - migrations: true - period: null - disable_updaters: false -notifier: - connstring: host=example-registry-clair-postgres port=5432 dbname=postgres user=postgres password=postgres sslmode=disable - migrations: true - indexer_addr: "" - matcher_addr: "" - poll_interval: 5m - delivery_interval: 1m - ... ----- + disable_updaters: true +--- +---- \ No newline at end of file diff --git a/modules/clair-openshift-manual.adoc b/modules/clair-openshift-manual.adoc index 11102cf02..4610a5e48 100644 --- a/modules/clair-openshift-manual.adoc +++ b/modules/clair-openshift-manual.adoc @@ -1,19 +1,28 @@ -[[clair-openshift-manual]] -= Manually Deploying Clair +// Module included in the following assemblies: +// +// clair/master.adoc -To configure Clair V4 on an existing {productname} OpenShift deployment running Clair V2, first ensure {productname} has been upgraded to at least version 3.4.0. Then use the following steps to manually set up Clair V4 alongside Clair V2. +:_content-type: PROCEDURE +[id="manually-deploy-clair-ocp"] += Setting up Clair on {productname} Operator deployment -. Set your current project to the name of the project in which {productname} is running. -For example: +Use the following procedure to configure Clair on a {productname} {ocp} deployment. + +.Prerequisites + +* Your {productname} Operator has been upgraded to 3.4.0 or greater. + +.Procedure + +. Enter the following command to set your current project to the name of the project that is running {productname}: + -``` +[source,terminal] +---- $ oc project quay-enterprise -``` +---- -. Create a Postgres deployment file for Clair v4 (for example, `clairv4-postgres.yaml`) -as follows. +. Create a Postgres deployment file for Clair, for example, `clairv4-postgres.yaml`: + -.clairv4-postgres.yaml [source,yaml] ---- --- @@ -70,6 +79,7 @@ spec: requests: storage: "5Gi" volumeName: "clairv4-postgres" + storageClassName: <1> --- apiVersion: v1 kind: Service @@ -87,20 +97,21 @@ spec: selector: quay-component: clairv4-postgres ---- +<1> If left unspecified, defaults to `quay-storageclass`. -. Deploy the postgres database as follows: +. Enter the following command to the deploy the Postgres database: + -``` +[source,terminal] +---- $ oc create -f ./clairv4-postgres.yaml -``` +---- -. Create a Clair `config.yaml` file to use for Clair v4. For example: +. Create a `config.yaml` file for Clair, for example: + -.config.yaml [source,yaml] ---- introspection_addr: :8089 -http_listen_addr: :8080 +http_listen_addr: :8081 log_level: debug indexer: connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable @@ -133,18 +144,17 @@ metrics: name: "prometheus" ---- <1> To generate a Clair pre-shared key (PSK), enable `scanning` in the Security Scanner section of the User Interface and click `Generate PSK`. - ++ More information about Clair's configuration format can be found in link:https://quay.github.io/clair/reference/config.html[upstream Clair documentation]. -. Create a secret from the Clair `config.yaml`: +. Enter the following command to create a secret from the Clair `config.yaml` file: + -``` +---- $ oc create secret generic clairv4-config-secret --from-file=./config.yaml -``` +---- -. Create the Clair v4 deployment file (for example, `clair-combo.yaml`) and modify it as necessary: +. Create a deployment file for Clair, for example, `clair-combo.yaml`: + -.clair-combo.yaml [source,yaml,subs="verbatim,attributes"] ---- --- @@ -211,37 +221,45 @@ spec: quay-component: clair-combo type: ClusterIP ---- -<1> Change image to latest clair image name and version. -<2> With the Service set to clairv4, the scanner endpoint for Clair v4 -is entered later into the {productname} config.yaml in the -`SECURITY_SCANNER_V4_ENDPOINT` as `\http://clairv4`. +<1> Use the latest Clair image name and version. +<2> With the `Service` set to `clairv4`, the scanner endpoint for Clair v4 is entered into the {productname} `config.yaml` file in the `SECURITY_SCANNER_V4_ENDPOINT` as `\http://clairv4`. -. Create the Clair v4 deployment as follows: +. Enter the following command to create the Clair deployment: + -``` +---- $ oc create -f ./clair-combo.yaml -``` +---- -. Modify the `config.yaml` file for your {productname} deployment to add the following -entries at the end: +. Add the following entries to your `config.yaml` file for your {productname} deployment. + [source,yaml] ---- FEATURE_SECURITY_NOTIFICATIONS: true FEATURE_SECURITY_SCANNER: true -SECURITY_SCANNER_V4_ENDPOINT: http://clairv4 <1> +SECURITY_SCANNER_V4_ENDPOINT: <1> +SECURITY_SCANNER_V4_PSK: <2> ---- -<1> Identify the Clair v4 service endpoint +<1> Obtained through the {productname} configuration tool. This parameter must be manually added if you do not use the {productname} configuration tool. +<2> Obtained through the {productname} configuration tool. This parameter must be manually added if you do not use the {productname} configuration tool. -. Redeploy the modified `config.yaml` to the secret containing that file -(for example, `quay-enterprise-config-secret`: +. Enter the following command to delete the original configuration secret for your `quay-enterprise` project: + -``` +[source,terminal] +---- $ oc delete secret quay-enterprise-config-secret -$ oc create secret generic quay-enterprise-config-secret --from-file=./config.yaml -``` +---- -. For the new `config.yaml` to take effect, you need to restart the {productname} pods. Simply deleting the `quay-app` pods causes pods with the updated configuration to be deployed. +. Deploy the modified `config.yaml` to the secret containing that file: ++ +[source,terminal] +---- +$ oc create secret generic quay-enterprise-config-secret --from-file=./config.yaml +---- -At this point, images in any of the organizations identified in the namespace whitelist will be scanned by Clair v4. +. Restart your {productname} pods. ++ +[NOTE] +==== +Deleting the `quay-app` pods causes pods with the updated configuration to be deployed. +==== \ No newline at end of file diff --git a/modules/clair-openshift.adoc b/modules/clair-openshift.adoc index f113bfae7..cd4cb3f17 100644 --- a/modules/clair-openshift.adoc +++ b/modules/clair-openshift.adoc @@ -1,5 +1,9 @@ -[[clair-openshift]] -= Setting Up Clair on a {productname} OpenShift deployment +// Module included in the following assemblies: +// +// clair/master.adoc -== Deploying Via the Quay Operator -To set up Clair V4 on a new {productname} deployment on OpenShift, it is highly recommended to use the Quay Operator. By default, the Quay Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair security scanning automatically. +:_content-type: CONCEPT +[id="clair-quay-operator-overview"] += Clair on {ocp} + +To set up Clair v4 (Clair) on a {productname} deployment on {ocp}, it is recommended to use the {productname} Operator. By default, the {productname} Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair automatically. diff --git a/modules/clair-standalone-config-location.adoc b/modules/clair-standalone-config-location.adoc index 4e6b9b8ea..18dc6e628 100644 --- a/modules/clair-standalone-config-location.adoc +++ b/modules/clair-standalone-config-location.adoc @@ -1,13 +1,47 @@ -[[clair-standalone-config-location]] -= Standalone Clair config +// Module included in the following assemblies: +// +// clair/master.adoc -For standalone Clair deployments, the config file is the one specified in CLAIR_CONF environment variable in the `podman run` command, for example: +:_content-type: PROCEDURE +[id="clair-standalone-config-location"] += Deploying a self-managed Clair container for disconnected {ocp} clusters +Use the following procedure to deploy a self-managed Clair container for disconnected {ocp} clusters. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. + +.Procedure + +. Create a folder for your Clair configuration file, for example: ++ +[source,terminal] +---- +$ mkdir /etc/clairv4/config/ +---- + +. Create a Clair configuration file with the `disable_updaters` parameter set to `true`, for example: ++ +[source,yaml] +---- +--- +indexer: + airgap: true +--- +matcher: + disable_updaters: true +--- +---- + +. Start Clair by using the container image, mounting in the configuration from the file you created: ++ [subs="verbatim,attributes"] -.... -sudo podman run -d --rm --name clairv4 \ - -p 8081:8081 -p 8089:8089 \ - -e CLAIR_CONF=/clair/config.yaml -e CLAIR_MODE=combo \ - -v /etc/clairv4/config:/clair:Z \ - {productrepo}/{clairimage}:{productminv} -.... +---- +$ sudo podman run -it --rm --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +-v /etc/clairv4/config:/clair:Z \ +{productrepo}/{clairimage}:{productminv} +---- diff --git a/modules/clair-standalone-configure.adoc b/modules/clair-standalone-configure.adoc new file mode 100644 index 000000000..0ed9ab074 --- /dev/null +++ b/modules/clair-standalone-configure.adoc @@ -0,0 +1,159 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-standalone-configure"] += Setting up Clair on standalone {productname} deployments + +For standalone {productname} deployments, you can set up Clair manually. + +.Procedure + +. In your {productname} installation directory, create a new directory for the Clair database data: ++ +[source,terminal] +---- +$ mkdir /home//quay-poc/postgres-clairv4 +---- + +. Set the appropriate permissions for the `postgres-clairv4` file by entering the following command: ++ +[source,terminal] +---- +$ setfacl -m u:26:-wx /home//quay-poc/postgres-clairv4 +---- + +. Deploy a Clair Postgres database by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --name postgresql-clairv4 \ + -e POSTGRESQL_USER=clairuser \ + -e POSTGRESQL_PASSWORD=clairpass \ + -e POSTGRESQL_DATABASE=clair \ + -e POSTGRESQL_ADMIN_PASSWORD=adminpass \ + -p 5433:5433 \ + -v /home//quay-poc/postgres-clairv4:/var/lib/pgsql/data:Z \ + registry.redhat.io/rhel8/postgresql-13:1-109 +---- + +. Install the Postgres `uuid-ossp` module for your Clair deployment: ++ +[source,terminal] +---- +$ podman exec -it postgresql-clairv4 /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"" | psql -d clair -U postgres' +---- ++ +.Example output +[source,terminal] +---- +CREATE EXTENSION +---- ++ +[NOTE] +==== +Clair requires the `uuid-ossp` extension to be added to its Postgres database. For users with proper privileges, creating the extension will automatically be added by Clair. If users do not have the proper privileges, the extension must be added before start Clair. + +If the extension is not present, the following error will be displayed when Clair attempts to start: `ERROR: Please load the "uuid-ossp" extension. (SQLSTATE 42501)`. +==== + +. Stop the `Quay` container if it is running and restart it in configuration mode, loading the existing configuration as a volume: ++ +[source,terminal] +---- +$ sudo podman run --rm -it --name quay_config \ + -p 80:8080 -p 443:8443 \ + -v $QUAY/config:/conf/stack:Z \ + registry.redhat.io/quay/quay-rhel8:v3.8.2 config secret +---- + +. Log in to the configuration tool and click *Enable Security Scanning* in the *Security Scanner* section of the UI. + +. Set the HTTP endpoint for Clair using a port that is not already in use on the `quay-server` system, for example, `8081`. + +. Create a pre-shared key (PSK) using the *Generate PSK* button. ++ +.Security Scanner UI +image:poc-quay-scanner-config.png[Security Scanner] + +. Validate and download the `config.yaml` file for {productname}, and then stop the `Quay` container that is running the configuration editor. + +. Extract the new configuration bundle into your {productname} installation directory, for example: ++ +[source,terminal] +---- +$ tar xvf quay-config.tar.gz -d /home//quay-poc/ +---- + +. Create a folder for your Clair configuration file, for example: ++ +[source,terminal] +---- +$ mkdir /etc/opt/clairv4/config/ +---- + +. Change into the Clair configuration folder: ++ +[source,terminal] +---- +$ cd /etc/opt/clairv4/config/ +---- + +. Create a Clair configuration file, for example: ++ +[source,yaml] +---- +http_listen_addr: :8081 +introspection_addr: :8088 +log_level: debug +indexer: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + scanlock_retry: 10 + layer_scan_concurrency: 5 + migrations: true +matcher: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + max_conn_pool: 100 + run: "" + migrations: true + indexer_addr: clair-indexer +notifier: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + delivery_interval: 1m + poll_interval: 5m + migrations: true +auth: + psk: + key: "MTU5YzA4Y2ZkNzJoMQ==" + iss: ["quay"] +# tracing and metrics +trace: + name: "jaeger" + probability: 1 + jaeger: + agent_endpoint: "localhost:6831" + service_name: "clair" +metrics: + name: "prometheus" +---- ++ +For more information about Clair's configuration format, see link:https://quay.github.io/clair/reference/config.html[Clair configuration reference]. + +. Start Clair by using the container image, mounting in the configuration from the file you created: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +-v /etc/opt/clairv4/config:/clair:Z \ +{productrepo}/{clairimage}:{productminv} +---- ++ +[NOTE] +==== +Running multiple Clair containers is also possible, but for deployment scenarios beyond a single container the use of a container orchestrator like Kubernetes or {ocp} is strongly recommended. +==== + diff --git a/modules/clair-standalone-database.adoc b/modules/clair-standalone-database.adoc index 282f6f355..688a16c63 100644 --- a/modules/clair-standalone-database.adoc +++ b/modules/clair-standalone-database.adoc @@ -22,7 +22,7 @@ $ sudo podman run -d --rm --name postgresql-clairv4 \ -e POSTGRESQL_ADMIN_PASSWORD=adminpass \ -p 5433:5432 \ -v $QUAY/postgres-clairv4:/var/lib/pgsql/data:Z \ - registry.redhat.io/rhel8/postgresql-10:1 + {postgresimage} .... . Ensure that the Postgres `uuid-ossp` module is installed, as it is required by Clair: + diff --git a/modules/clair-standalone-quay-config.adoc b/modules/clair-standalone-quay-config.adoc index 20f50a185..db2e55355 100644 --- a/modules/clair-standalone-quay-config.adoc +++ b/modules/clair-standalone-quay-config.adoc @@ -4,12 +4,12 @@ . Stop the `Quay` container if it is running, and restart it in configuration mode, loading the existing configuration as a volume: + [subs="verbatim,attributes"] -.... -$ sudo podman run --rm -it --name quay_config \ +---- +$ sudo podman run --rm -it --name quay_config \ -p 80:8080 -p 443:8443 \ -v $QUAY/config:/conf/stack:Z \ {productrepo}/{quayimage}:{productminv} config secret -.... +---- . Log in to the configuration tool and enable scanning in the Security Scanner section of the UI. Set the HTTP endpoint for Clair using a port that is not already in use on the `quay-server` system, for example `8081`. Create a Clair pre-shared key (PSK) using the `Generate PSK` button, for example: + @@ -29,13 +29,12 @@ $ cd $QUAY/config $ tar xvf quay-config.tar.gz .... -The Quay configuration file is now updated to contain the following fields for the security scanner: +The {productname} configuration file is now updated to contain the following fields for the security scanner: -.$QUAY/config/config.yaml [source,yaml] ---- ... -FEATURE_SECURITY_NOTIFICATIONS: false +FEATURE_SECURITY_NOTIFICATIONS: true FEATURE_SECURITY_SCANNER: true ... SECURITY_SCANNER_INDEXING_INTERVAL: 30 diff --git a/modules/clair-standalone-running.adoc b/modules/clair-standalone-running.adoc index 59dd48339..d348be786 100644 --- a/modules/clair-standalone-running.adoc +++ b/modules/clair-standalone-running.adoc @@ -9,17 +9,17 @@ $ sudo podman run -d --rm --name clairv4 \ -p 8081:8081 -p 8089:8089 \ -e CLAIR_CONF=/clair/config.yaml -e CLAIR_MODE=combo \ -v /etc/clairv4/config:/clair:Z \ - {productrepo}/{clairimage}:{productminv} + {productrepo}/{clairimage}:{productminv} .... . Next, restart the `Quay` container using the updated configuration file containing the scanner settings: + [subs="verbatim,attributes"] -.... +---- $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} -.... +---- diff --git a/modules/clair-standalone-using.adoc b/modules/clair-standalone-using.adoc index 6d39b5b44..f3705ffc8 100644 --- a/modules/clair-standalone-using.adoc +++ b/modules/clair-standalone-using.adoc @@ -21,8 +21,8 @@ $ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/ubuntu:2 The results from the security scanning can be seen in the Quay UI, as shown in the following images: -.Scanning summary +.Report summary image:poc-clair-1.png[Scanning summary] -.Scanning details +.Report details image:poc-clair-2.png[Scanning details] \ No newline at end of file diff --git a/modules/clair-standalone.adoc b/modules/clair-standalone.adoc deleted file mode 100644 index 12aa4288a..000000000 --- a/modules/clair-standalone.adoc +++ /dev/null @@ -1,60 +0,0 @@ -[[clair-standalone]] -= Setting up Clair on a non-OpenShift {productname} deployment - -For {productname} deployments not running on OpenShift, it is possible to configure Clair security scanning manually. {productname} deployments already running Clair V2 can use the instructions below to add Clair V4 to their deployment. - -. Deploy a (preferably fault-tolerant) Postgres database server. Note that Clair requires the `uuid-ossp` extension to be added to its Postgres database. If the user supplied in Clair's `config.yaml` has the necessary privileges to create the extension then it will be added automatically by Clair itself. If not, then the extension must be added before starting Clair. If the extension is not present, the following error will be displayed when Clair attempts to start. -+ -``` -ERROR: Please load the "uuid-ossp" extension. (SQLSTATE 42501) -``` -+ -. Create a Clair config file in a specific folder, for example, `/etc/clairv4/config/config.yaml`). -+ -.config.yaml -[source,yaml] ----- -introspection_addr: :8089 -http_listen_addr: :8080 -log_level: debug -indexer: - connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable - scanlock_retry: 10 - layer_scan_concurrency: 5 - migrations: true -matcher: - connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable - max_conn_pool: 100 - run: "" - migrations: true - indexer_addr: clair-indexer -notifier: -  connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable -  delivery_interval: 1m -  poll_interval: 5m -  migrations: true - -# tracing and metrics -trace: - name: "jaeger" - probability: 1 - jaeger: - agent_endpoint: "localhost:6831" - service_name: "clair" -metrics: - name: "prometheus" ----- - -More information about Clair's configuration format can be found in link:https://quay.github.io/clair/reference/config.html[upstream Clair documentation]. - -. Run Clair via the container image, mounting in the configuration from the file you created. -+ -[subs="verbatim,attributes"] -``` -$ podman run -p 8080:8080 -p 8089:8089 -e CLAIR_CONF=/clair/config.yaml -e CLAIR_MODE=combo -v /etc/clair4/config:/clair -d {productrepo}/{clairimage}:{productminv} -``` - -. Follow the remaining instructions from the previous section for configuring {productname} to use the new Clair V4 endpoint. - -Running multiple Clair containers in this fashion is also possible, but for deployment scenarios beyond a single container the use of a container orchestrator like Kubernetes or OpenShift is strongly recommended. - diff --git a/modules/clair-testing.adoc b/modules/clair-testing.adoc new file mode 100644 index 000000000..c1d79ca6f --- /dev/null +++ b/modules/clair-testing.adoc @@ -0,0 +1,55 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-testing"] += Testing Clair + +Use the following procedure to test Clair on either a standalone {productname} deployment, or on an {ocp} Operator-based deployment. + +.Prerequisites + +* You have deployed the Clair container image. + +.Procedure + +. Pull a sample image by entering the following command: ++ +[source,terminal] +---- +$ podman pull ubuntu:20.04 +---- + +. Tag the image to your registry by entering the following command: ++ +[source,terminal] +---- +$ sudo podman tag docker.io/library/ubuntu:20.04 //ubuntu:20.04 +---- + +. Push the image to your {productname} registry by entering the following command: ++ +[source,terminal] +---- +$ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/ubuntu:20.04 +---- + +. Log in to your {productname} deployment through the UI. + +. Click the repository name, for example, *quayadmin/ubuntu*. + +. In the navigation pane, click *Tags*. ++ +.Report summary +image:clair-reposcan.png[Security scan information appears for scanned repository images] + +. Click the image report, for example, *45 medium*, to show a more detailed report: ++ +.Report details +image:clair-vulnerabilities.png[See all vulnerabilities or only those that are fixable] ++ +[NOTE] +==== +In some cases, Clair shows duplicate reports on images, for example, `ubi8/nodejs-12` or `ubi8/nodejs-16`. This occurs because vulnerabilities with same name are for different packages. This behavior is expected with Clair vulnerability reporting and will not be addressed as a bug. +==== \ No newline at end of file diff --git a/modules/clair-unmanaged.adoc b/modules/clair-unmanaged.adoc index c60a1629b..dadda3417 100644 --- a/modules/clair-unmanaged.adoc +++ b/modules/clair-unmanaged.adoc @@ -1,91 +1,11 @@ -[[clair-unmanaged]] -= Unmanaged Clair database +// Module included in the following assemblies: +// +// clair/master.adoc -With {productname} 3.7, users can provide a custom Clair configuration for an unmanaged Clair database on the {productname} OpenShift Container Platform Operator. An unmanaged Clair database allows the {productname} Operator to work in a geo-replicated environment, where multiple instances of the Operator must communicate with the same database. An unmanaged Clair database can also be used when a user requires a highly-available (HA) Clair database that exists outside of a cluster. +:_content-type: CONCEPT +[id="unmanaged-clair-configuration"] += Unmanaged Clair configuration -== Configuring an unmanaged Clair database +{productname} users can run an unmanaged Clair configuration with the {productname} {ocp} Operator. This feature allows users to create an unmanaged Clair database, or run their custom Clair configuration without an unmanaged database. -The {productname} Operator for OpenShift Container Platform allows users to provide their own Clair configuration by editing the `configBundleSecret` parameter. - -. In the Quay Operator, set the `clairpostgres` component of the QuayRegistry custom resource to `unmanaged`: -+ -[source,yaml] ----- -apiVersion: quay.redhat.com/v1 -kind: QuayRegistry -metadata: - name: quay370 -spec: - configBundleSecret: config-bundle-secret - components: - - kind: objectstorage - managed: false - - kind: route - managed: true - - kind: tls - managed: false - - kind: clairpostgres - managed: false ----- - -. Create a `clair-config.yaml` bundle secret: -+ -[source,terminal] ----- -$ oc create secret generic --from-file config.yaml=./config.yaml --from-file extra_ca_cert_rds-ca-2019-root.pem=./rds-ca-2019-root.pem --from-file clair-config.yaml=./clair-config-aws-rds-postgres_ca_cert.yaml --from-file ssl.cert=./ssl.cert --from-file ssl.key=./ssl.key config-bundle-secret ----- -+ -Example `clair-config.yaml` configuration: -+ -[source,yaml] ----- -auth: - psk: - iss: - - quay - - clairctl - key: -http_listen_addr: :8080 -indexer: - connstring: host=quay-server.example.com port=5432 dbname=quay user=clairuser password=clairpass sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca - migrations: true -log_level: debug -matcher: - connstring: host=quay-server.example.com port=5432 dbname=quay user=clairuser password=clairpass sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca - migrations: true -metrics: - name: prometheus -notifier: - connstring: host=quay-server.example.com port=5432 dbname=quay user=clairuser password=clairpass sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca - migrations: true ----- -+ -[NOTE] -==== -* The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`. -* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config]. -==== - -. Add the `clair-config.yaml` bundle secret to your `configBundleSecret`. For example: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: config-bundle-secret - namespace: quay-enterprise -data: - config.yaml: - clair-config.yaml: - extra_ca_cert_: - clair-ssl.crt: >- - clair-ssl.key: >- ----- -+ -[NOTE] -==== -When updated, the provided `clair-config.yaml` is mounted into the Clair pod. Any fields not provided are automatically populated with defaults using the Clair configuration module. -==== - -After proper configuration, the Clair application pod should return to a `Ready` state. +An unmanaged Clair database allows the {productname} Operator to work in a geo-replicated environment, where multiple instances of the Operator must communicate with the same database. An unmanaged Clair database can also be used when a user requires a highly-available (HA) Clair database that exists outside of a cluster. \ No newline at end of file diff --git a/modules/clair-updater-urls.adoc b/modules/clair-updater-urls.adoc index 9e0a99a9a..b7e30e463 100644 --- a/modules/clair-updater-urls.adoc +++ b/modules/clair-updater-urls.adoc @@ -1,7 +1,12 @@ -[[clair-updater-urls]] +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-updater-urls"] = Clair updater URLs -The following are the HTTP hosts and paths that Clair will attempt to talk to in a default configuration. This list is non-exhaustive, as some servers will issue redirects and some request URLs are constructed dynamically. +The following are the HTTP hosts and paths that Clair will attempt to talk to in a default configuration. This list is non-exhaustive. Some servers issue redirects and some request URLs are constructed dynamically. * \https://secdb.alpinelinux.org/ * \http://repo.us-west-2.amazonaws.com/2018.03/updates/x86_64/mirror.list diff --git a/modules/clair-updaters.adoc b/modules/clair-updaters.adoc new file mode 100644 index 000000000..ba0daf8f4 --- /dev/null +++ b/modules/clair-updaters.adoc @@ -0,0 +1,95 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-updaters"] += Clair updaters + +Clair uses `Go` packages called _updaters_ that contain the logic of fetching and parsing different vulnerability databases. + +Updaters are usually paired with a matcher to interpret if, and how, any vulnerability is related to a package. Administrators might want to update the vulnerability database less frequently, or not import vulnerabilities from databases that they know will not be used. + +[id="configuring-updaters"] +== Configuring updaters + +Updaters can be configured by the `updaters` key at the top of the configuration. If updaters are being run automatically within the matcher process, which is the default setting, the period for running updaters is configured under the matcher's configuration field. + +[id="updater-sets"] +=== Updater sets + +The following sets can be configured with Clair updaters: + +* `alpine` +* `aws` +* `debian` +* `enricher/cvss` +* `libvuln/driver` +* `oracle` +* `photon` +* `pyupio` +* `rhel` +* `rhel/rhcc` +* `suse` +* `ubuntu` +* `updater` + +[id="selecting-updater-sets"] +=== Selecting updater sets + +Specific sets of updaters can be selected by the `sets` list. For example: + +[source,yaml] +---- +updaters: + sets: + - rhel +---- + +If the `sets` field is not populated, it defaults to using all sets. + +[id="filtering-updater-sets"] +=== Filtering updater sets + +To reject an updater from running without disabling an entire set, the `filter` option can be used. + +In the following example, the string is interpreted as a Go `regexp` package. This rejects any updater with a name that does not match. + +[NOTE] +==== +This means that an empty string matches any string. It does not mean that it matches no strings. +==== + +[source,yaml] +---- +updaters: + filter: '^$' +---- + +[id="configuring-specific-updaters"] +=== Configuring specific updaters + +Configuration for specific updaters can be passed by putting a key underneath the `config` parameter of the `updaters` object. The name of an updater might be constructed dynamically, and users should examine logs to ensure updater names are accurate. The specific object that an updater expects should be covered in the updater's documentation. + +In the following example, the `rhel` updater fetches a manifest from a different location: + +[source,yaml] +---- +updaters: + config: + rhel: + url: https://example.com/mirror/oval/PULP_MANIFEST +---- + +[id="disabling-clair-updater-component-managed-db"] +=== Disabling the Clair Updater component + +In some scenarios, users might want to disable the Clair updater component. Disabling updaters is required when running {productname} in a disconnected environment. + +In the following example, Clair updaters are disabled: + +[source,yaml] +---- +matcher: + disable_updaters: true +---- \ No newline at end of file diff --git a/modules/clair-using.adoc b/modules/clair-using.adoc index 47cfa9069..334c32450 100644 --- a/modules/clair-using.adoc +++ b/modules/clair-using.adoc @@ -1,6 +1,15 @@ -[[clair-using]] +:_content-type: PROCEDURE +[id="clair-using"] = Using Clair +Use the following procedure to ensure that Clair is working on your {productname} Operator deployment. + +.Prerequisites + +* You have configured Clair for your {ocp} deployment. + +.Procedure + . Log in to your {productname} cluster and select an organization for which you have configured Clair scanning. diff --git a/modules/clair-vulnerability-scanner-hosts.adoc b/modules/clair-vulnerability-scanner-hosts.adoc new file mode 100644 index 000000000..4857eb947 --- /dev/null +++ b/modules/clair-vulnerability-scanner-hosts.adoc @@ -0,0 +1,22 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-vulnerability-scanner-hosts"] += Clair vulnerability databases + +Clair uses the following vulnerability databases to report for issues in your images: + +* Ubuntu Oval database +* Debian Oval database +* {rhel} Oval database +* SUSE Oval database +* Oracle Oval database +* Alpine SecDB database +* VMWare Photon OS database +* Amazon Web Services (AWS) UpdateInfo +* Pyup.io (Python) database + +For information about how Clair does security mapping with the different databases, see +link:https://quay.github.io/claircore/concepts/severity_mapping.html[ClairCore Severity Mapping]. \ No newline at end of file diff --git a/modules/clair-vulnerability-scanner-overview.adoc b/modules/clair-vulnerability-scanner-overview.adoc new file mode 100644 index 000000000..c83bae9c7 --- /dev/null +++ b/modules/clair-vulnerability-scanner-overview.adoc @@ -0,0 +1,21 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-vulnerability-scanner"] += Clair for {productname} + +Clair v4 (Clair) is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {productname} and can be used in both standalone and Operator deployments. It can be run in highly scalable configurations, where components can be scaled separately as appropriate for enterprise environments. + +//// +[NOTE] +==== +ifeval::["{productname}" == "Red Hat Quay"] +With the release of {productname} 3.4, Clair v4 (image {productrepo}/{clairimage} fully replaced Clair v2 (image quay.io/redhat/clair-jwt). See below for how to run Clair v2 in read-only mode while Clair v4 is updating. +endif::[] +ifeval::["{productname}" == "Project Quay"] +With the release of Clair v4 (image clair), the previously used Clair v2 (image clair-jwt) is no longer used. See below for how to run Clair v2 in read-only mode while Clair v4 is updating. +endif::[] +==== +//// \ No newline at end of file diff --git a/modules/clairv2-to-v4.adoc b/modules/clairv2-to-v4.adoc index ac79c2cad..e13c0d4ce 100644 --- a/modules/clairv2-to-v4.adoc +++ b/modules/clairv2-to-v4.adoc @@ -1,7 +1,7 @@ [[clairv2-to-v4]] = Migrating from Clair v2 to Clair v4 -Starting with {productname} 3.4, Clair v4 is used by default. It will also be the only version of Clair continually supported, as older {productname} versions are not supported with Clair v4 in production. Users should continue using Clair v2 if using a version of {productname} earlier than 3.4. +Starting with {productname} 3.4, Clair v4 is used by default. It will also be the only version of Clair continually supported, as older versions of {productname} are not supported with Clair v4 in production. Users should continue using Clair v2 if using a version of {productname} earlier than 3.4. Existing {productname} 3.3 deployments will be upgraded to Clair v4 when managed via the {productname} Operator. Manually upgraded {productname} deployments can install Clair v4 side-by-side, which will cause the following: diff --git a/modules/clairv4-air-gapped.adoc b/modules/clairv4-air-gapped.adoc index 3724a661a..fe0b14c5c 100644 --- a/modules/clairv4-air-gapped.adoc +++ b/modules/clairv4-air-gapped.adoc @@ -1,8 +1,10 @@ [[clairv4-air-gapped]] = Air-gapped Clair v4 -{productname} 3.4 and later and Clair v4 are supported in disconnected environments. By default, Clair v4 will attempt to run automated updates against Red Hat servers. When Clair v4 in network environments is disconnected from the internet: +{productname} 3.4 and later and Clair v4 are supported in disconnected environments. By default, Clair v4 will attempt to run automated updates against Red Hat servers. When Clair v4 in network environments is disconnected from the internet: -* The Clair v4 auto-update is disabled in the Clair `config` bundle. -* On a system with internet access, the vulnerability database updates is performed manually and exported to a disk. -* The on-disk data is then transferred to the target system with offline media. It is then manually imported. \ No newline at end of file +* The Clair v4 auto-update is disabled in the Clair `config` bundle. +* On a system with internet access, the vulnerability database updates is performed manually and exported to a disk. +* The on-disk data is then transferred to the target system with offline media. It is then manually imported. + +For more information on air-gapped Clair v4 and using `clairctl`, the command line tool, see https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-disconnected-environments[Manually updating the vulnerability databases for Clair in an air-gapped OpenShift cluster] diff --git a/modules/clairv4-arch.adoc b/modules/clairv4-arch.adoc index f608b352b..5927e469b 100644 --- a/modules/clairv4-arch.adoc +++ b/modules/clairv4-arch.adoc @@ -1,11 +1,11 @@ [[clairv4-arch]] = Clair v4 architecture -Clair v4 utilizes the ClairCore library as its engine for examining contents and reporting vulnerabilities. At a high level you can consider Clair a service wrapper to the functionality provided in the ClairCore library. +Clair v4 utilizes the ClairCore library as its engine for examining contents and reporting vulnerabilities. At a high level, you can consider Clair as a service wrapper to the functionality provided in the ClairCore library. == ClairCore -ClairCore is the engine behind Clair v4's container security solution. The ClairCore package exports our domain models, interfaces necessary to plug into our business logic, and a default set of implementations. This default set of implementations defines our support matrix. +ClairCore is the engine behind Clair v4's container security solution. The ClairCore package exports domain models, interfaces that are necessary to plug into the business logic, and a default set of implementations. This default set of implementations defines the support matrix. ClairCore relies on Postgres for its persistence and the library will handle migrations if configured to do so. diff --git a/modules/clairv4-limitations.adoc b/modules/clairv4-limitations.adoc index 7a63940d7..a8a657120 100644 --- a/modules/clairv4-limitations.adoc +++ b/modules/clairv4-limitations.adoc @@ -9,4 +9,4 @@ The following limitations are currently being addressed by the development team: * Clair v4 does not currently support MSFT Windows images. -* Clair v4 does not currently support slim/scratch container images. \ No newline at end of file +* Clair v4 does not currently support slim / scratch container images. \ No newline at end of file diff --git a/modules/con_quay_ha_prereq.adoc b/modules/con_quay_ha_prereq.adoc index 01f3ffba6..f907dfcb8 100644 --- a/modules/con_quay_ha_prereq.adoc +++ b/modules/con_quay_ha_prereq.adoc @@ -46,7 +46,7 @@ Each system should have the following attributes: //* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux server media from the link:https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.5/x86_64/product-software[Downloads page] and follow instructions from the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/installation_guide/index[Red Hat Enterprise Linux 7 Installation Guide] to install RHEL on each system. //** **Valid Red Hat Subscription**: Obtain Red Hat Enterprise Linux server subscriptions and apply one to each system. -* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux 8 server media from the link:https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.3/x86_64/product-software[Downloads page] and follow the installation instructions available in the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/[Product Documentation for Red Hat Enterprise Linux 8]. +* **Red Hat Enterprise Linux (RHEL)** 8: Obtain the latest Red Hat Enterprise Linux 8 server media from the link:https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.3/x86_64/product-software[Downloads page] and follow the installation instructions available in the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/[Product Documentation for Red Hat Enterprise Linux 8]. ** **Valid Red Hat Subscription**: Configure a valid Red Hat Enterprise Linux 8 server subscription. ** **CPUs**: Two or more virtual CPUs @@ -55,7 +55,7 @@ Each system should have the following attributes: [NOTE] ==== -Red Hat Enterprise Linux (RHEL) 8 is strongly recommended for highly available, production quality deployments of Red Hat Quay 3.6. RHEL 7 has not been tested with Red Hat Quay 3.6, and will be deprecated in a future release. +Red Hat Enterprise Linux (RHEL) 8 is strongly recommended for highly available, production quality deployments of {productname} 3.7. RHEL 7 has not been tested with {productname} 3.7, and will be deprecated in a future release. ==== [[using-podman]] @@ -65,14 +65,14 @@ This document uses podman for creating and deploying containers. If you do not h [NOTE] ==== -Podman is strongly recommended for highly available, production quality deployments of Red Hat Quay 3.6. Docker has not been tested with Red Hat Quay 3.6, and will be deprecated in a future release. +Podman is strongly recommended for highly available, production quality deployments of {productname} 3.7. Docker has not been tested with {productname} 3.7, and will be deprecated in a future release. ==== //// == Restarting containers -Because the `--restart` option is not fully supported by podman, instead of using `--restart`, you could configure `podman` as a systemd service, as described +Because the `--restart` option is not fully supported by podman, instead of using `--restart`, you could configure `podman` as a systemd service, as described in https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#porting-containers-to-systemd-using-podman_building-running-and-managing-containers[Porting containers to systemd using Podman] //// diff --git a/modules/con_quay_intro.adoc b/modules/con_quay_intro.adoc index a2ef57cc1..91f3524ca 100644 --- a/modules/con_quay_intro.adoc +++ b/modules/con_quay_intro.adoc @@ -1,18 +1,20 @@ +:_content-type: CONCEPT +[id="poc-overview"] = Overview -Features of {productname} include: +{productname} includes the following features: * High availability * Geo-replication -* Repository mirroring -* Docker v2, schema 2 (multiarch) support +* Repository mirroring +* Docker v2, schema 2 (multi-arch) support * Continuous integration * Security scanning with Clair * Custom log rotation * Zero downtime garbage collection * 24/7 support -{productname} provides support for: +{productname} provides support for the following: * Multiple authentication and access methods * Multiple storage backends @@ -20,23 +22,29 @@ Features of {productname} include: * Application registries * Different container image types +[id="poc-architecture"] == Architecture -{productname} consists of a number of core components, both internal and external. - +{productname} includes several core components, both internal and external. +[id="poc-internal-components"] === Internal components -* **Quay (container registry)**: Runs the `Quay` container as a service, consisting of several components in the pod. -* **Clair**: Scans container images for vulnerabilities and suggests fixes. +{productname} includes the following internal components: + +* **Quay (container registry)**. Runs the `Quay` container as a service, consisting of several components in the pod. +* **Clair**. Scans container images for vulnerabilities and suggests fixes. +[id="poc-external-components"] === External components -* **Database**: Used by {productname} as its primary metadata storage. Note that this is not for image storage. -* **Redis (key-value store)**: Stores live builder logs and the {productname} tutorial. -* **Cloud storage**:For supported deployments, you need to use one of the following types of storage: -** **Public cloud storage**: In public cloud environments, you should use the cloud provider's object storage, such as Amazon Web Services's Amazon S3 or Google Cloud's Google Cloud Storage. -** **Private cloud storage**: In private clouds, an S3 or Swift compliant Object Store is needed, such as Ceph RADOS, or OpenStack Swift. +{productname} includes the following external components: + +* **Database**. Used by {productname} as its primary metadata storage. Note that this is not for image storage. +* **Redis (key-value store)**. Stores live builder logs and the {productname} tutorial. Also includes the locking mechanism that is required for garbage collection. +* **Cloud storage**. For supported deployments, one of the following storage types must be used: +** **Public cloud storage**. In public cloud environments, you should use the cloud provider's object storage, such as Amazon Web Services's Amazon S3 or Google Cloud's Google Cloud Storage. +** **Private cloud storage**. In private clouds, an S3 or Swift compliant Object Store is needed, such as Ceph RADOS, or OpenStack Swift. [WARNING] ==== diff --git a/modules/con_quay_single_prereq.adoc b/modules/con_quay_single_prereq.adoc index c2014b6de..bf8279148 100644 --- a/modules/con_quay_single_prereq.adoc +++ b/modules/con_quay_single_prereq.adoc @@ -1,29 +1,39 @@ +:_content-type: CONCEPT +[id="poc-prerequisites"] = Prerequisites ifeval::["{productname}" == "Red Hat Quay"] //* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux 7 server media from the link:https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.9/x86_64/product-software[Downloads page] and follow the installation instructions from the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/installation_guide/index[Red Hat Enterprise Linux 7 Installation Guide]. -* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux 8 server media from the link:https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.3/x86_64/product-software[Downloads page] and follow the installation instructions available in the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/[Product Documentation for Red Hat Enterprise Linux 8]. -* **Valid Red Hat Subscription**: Configure a valid Red Hat Enterprise Linux 8 server subscription. +* Red Hat Enterprise Linux (RHEL) 8 +** To obtain the latest version of {rhel} 8, see link:https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.3/x86_64/product-software[Downlad Red Hat Enterprise Linux]. +** For installation instructions, see the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/[Product Documentation for Red Hat Enterprise Linux 8]. +* An active subscription to Red Hat endif::[] -* **CPUs**: Two or more virtual CPUs. -* **RAM**: 4GB or more. -* **Disk space**: The required disk space depends on the storage needs for the registry. Approximately 30GB of disk space should be enough for a test system, broken down as follows: -** At least 10GB of disk space for the Red Hat Enterprise Linux operating system. -** At least 10GB of disk space for docker storage (to run 3 containers). -** At least 10GB of disk space for Quay local storage. Note that CEPH or other local storage might require more memory. - +* Two or more virtual CPUs +* 4 GB or more of RAM +* Approximately 30 GB of disk space on your test system, which can be broken down as follows: +** Approximately 10 GB of disk space for the {rhel} operating system. +** Approximately 10 GB of disk space for Docker storage for running three containers. +** Approximately 10 GB of disk space for {productname} local storage. ++ +[NOTE] +==== +CEPH or other local storage might require more memory. +==== ++ More information on sizing can be found at link:https://access.redhat.com/articles/5177961[Quay 3.x Sizing Guidlines]. [NOTE] ==== -Red Hat Enterprise Linux (RHEL) 8 is strongly recommended for highly available, production quality deployments of Red Hat Quay 3.6. RHEL 7 has not been tested with Red Hat Quay 3.6, and will be deprecated in a future release. +Red Hat Enterprise Linux (RHEL) 8 is recommended for highly available, production quality deployments of {productname} {producty}. RHEL 7 has not been tested with {productname} {producty}, and will be deprecated in a future release. ==== +[id="poc-using-podman"] == Using Podman -This document uses Podman for creating and deploying containers. If you do not have Podman installed on your system, you should be able to use the equivalent Docker commands. For more information on Podman and related technologies, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index[Building, running, and managing Linux containers on Red Hat Enterprise Linux 8]. +This document uses Podman for creating and deploying containers. For more information on Podman and related technologies, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index[Building, running, and managing Linux containers on Red Hat Enterprise Linux 8]. -[NOTE] -==== -Podman is strongly recommended for highly available, production quality deployments of Red Hat Quay 3.6. Docker has not been tested with Red Hat Quay 3.6, and will be deprecated in a future release. +[IMPORTANT] ==== +If you do not have Podman installed on your system, the use of equivalent Docker commands might be possible, however this is not recommended. Docker has not been tested with {productname} {producty}, and will be deprecated in a future release. Podman is recommended for highly available, production quality deployments of {productname} {producty}. +==== \ No newline at end of file diff --git a/modules/con_schema.adoc b/modules/con_schema.adoc index 30eb82529..ba0ab9b91 100644 --- a/modules/con_schema.adoc +++ b/modules/con_schema.adoc @@ -5,7 +5,7 @@ Most {productname} configuration information is stored in the `config.yaml` file using the browser-based config tool when {productname} is first deployed. -// TODO 36 Add link to standalone config guide +// TODO 36 Add link to standalone config guide // https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/ The configuration options are described in the {productname} Configuration Guide. @@ -164,7 +164,7 @@ azureStorage: storage_path: /datastorage/registry ``` -** **Google Cloud Storage**: +** **Google Cloud Storage**: + ``` googleCloudStorage: @@ -188,7 +188,7 @@ swiftStorage: ca_cert_path: /conf/stack/swift.cert" storage_path: /datastorage/registry ``` - +* **DEFAULT_SYSTEM_REJECT_QUOTA_BYTES** [string]: The quota size to apply to all organizations and users. * **DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS** [array]: The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose images should be fully replicated, by default, to all other storage engines. ** **Min Items**: None ** **Example**: `s3_us_east, s3_us_west` @@ -254,10 +254,14 @@ swiftStorage: ** **Example**: `True` * **FEATURE_PERMANENT_SESSIONS** [boolean]: Whether sessions are permanent. Defaults to True. ** **Example**: `True` +* **FEATURE_PROXY_CACHE** [boolean]: Whether to enable proxy caching for {productname}. +** **Example**: `True` * **FEATURE_PROXY_STORAGE** [boolean]: Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False. ** **Example**: `False` * **FEATURE_PUBLIC_CATALOG** [boolean]: If set to true, the `_catalog` endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False. ** **Example**: `False` +* **FEATURE_QUOTA_MANAGEMENT** [boolean]: If set to true, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. +** **Example**: `True` * **FEATURE_RATE_LIMITS** [boolean]: Whether to enable rate limits on API and registry endpoints. Defaults to False. ** **Example**: `False` * **FEATURE_READER_BUILD_LOGS** [boolean]: If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False. @@ -368,7 +372,7 @@ swiftStorage: * **LDAP_BASE_DN** [string]: The base DN for LDAP authentication. * **LDAP_EMAIL_ATTR** [string]: The email attribute for LDAP authentication. * **LDAP_UID_ATTR** [string]: The uid attribute for LDAP authentication. -* **LDAP_URI** [string]: The LDAP URI. +* **LDAP_URI** [string]: The LDAP URI. * **LDAP_USER_FILTER** [string]: The user filter for LDAP authentication. * **LDAP_USER_RDN** [array]: The user RDN for LDAP authentication. * **LOGS_MODEL** [string]: Logs model for action logs. @@ -439,7 +443,7 @@ swiftStorage: * **MAXIMUM_LAYER_SIZE** [string]: Maximum allowed size of an image layer. Defaults to 20G. ** **Pattern**: ``^[0-9]+(G|M)$`` ** **Example**: `100G` -* **PREFERRED_URL_SCHEME** [string]: The URL scheme to use when hitting +* **PREFERRED_URL_SCHEME** [string]: The URL scheme to use when hitting {productname}. If {productname} is behind SSL *at all*, this *must* be `https` ** **enum**: `http` or `https` ** **Example**: `https` @@ -480,7 +484,7 @@ swiftStorage: * **SECURITY_SCANNER_V4_ENDPOINT** [string]: The endpoint for the V4 security scanner. ** **Pattern**: ``^http(s)?://(.)+$`` ** **Example**: `http://192.168.99.101:6060` -* **SECURITY_SCANNER_V4_PSK** [string]: The generated pre-shared key (PSK) for Clair. +* **SECURITY_SCANNER_V4_PSK** [string]: The generated pre-shared key (PSK) for Clair. * **SERVER_HOSTNAME** [string] required: The URL at which {productname} is accessible, without the scheme. ** **Example**: `quay.io` * **SESSION_COOKIE_SECURE** [boolean]: Whether the `secure` property should be set on session cookies. Defaults to False. Recommended to be True for all installations using SSL. diff --git a/modules/conc_quay-bridge-operator.adoc b/modules/conc_quay-bridge-operator.adoc new file mode 100644 index 000000000..876b7700b --- /dev/null +++ b/modules/conc_quay-bridge-operator.adoc @@ -0,0 +1,16 @@ +:_content-type: CONCEPT +[[quay-bridge-operator]] += Integrating {productname} into {ocp} with the {qbo} + +Using the {qbo}, you can replace the integrated container registry in {ocp} with a {productname} registry. By doing this, your integrated {ocp} registry becomes a highly available, enterprise-grade {productname} registry with enhanced role based access control (RBAC) features. + +The primary goal of the {qbo} is to duplicate the features of the integrated {ocp} registry in the new {productname} registry. The features enabled with the {qbo} include: + +* Synchronizing {ocp} namespaces as {productname} organizations. +* Creating robot accounts for each default namespace service account. +* Creating secrets for each created robot account, and associating each robot secret to a service account as `Mountable` and `Image Pull Secret`. +* Synchronizing {ocp} image streams as {productname} repositories. +* Automatically rewriting new builds making use of image streams to output to {productname}. +* Automatically importing an image stream tag once a build completes. + +By using the following procedures, you will enable bi-directional communication between your {productname} and {ocp} clusters. diff --git a/modules/config-api-default.adoc b/modules/config-api-default.adoc index 4651e4faf..6acb11abd 100644 --- a/modules/config-api-default.adoc +++ b/modules/config-api-default.adoc @@ -1,13 +1,13 @@ -= Retrieving the default configuration += Retrieving the default configuration If you are running the configuration tool for the first time, and do not have an existing configuration, you can retrieve the default configuration. Start the container in config mode: [subs="verbatim,attributes"] -.... -$ sudo podman run --rm -it --name quay_config \ +---- +$ sudo podman run --rm -it --name quay_config \ -p 8080:8080 \ {productrepo}/{quayimage}:{productminv} config secret -.... +---- Use the `config` endpoint of the configuration API to get the default: diff --git a/modules/config-api-intro.adoc b/modules/config-api-intro.adoc index c86f8e0d7..c11c2a609 100644 --- a/modules/config-api-intro.adoc +++ b/modules/config-api-intro.adoc @@ -1,3 +1,5 @@ +:_content-type: CONCEPT +[id="config-using-api"] = Using the configuration API The configuration tool exposes 4 endpoints that can be used to build, validate, bundle and deploy a configuration. The config-tool API is documented at link:https://github.com/quay/config-tool/blob/master/pkg/lib/editor/API.md[]. In this section, you will see how to use the API to retrieve the current configuration and how to validate any changes you make. \ No newline at end of file diff --git a/modules/config-api-retrieve.adoc b/modules/config-api-retrieve.adoc index 245ae5f69..85e505995 100644 --- a/modules/config-api-retrieve.adoc +++ b/modules/config-api-retrieve.adoc @@ -3,12 +3,12 @@ If you have already configured and deployed the Quay registry, stop the container and restart it in configuration mode, loading the existing configuration as a volume: [subs="verbatim,attributes"] -.... -$ sudo podman run --rm -it --name quay_config \ +---- +$ sudo podman run --rm -it --name quay_config \ -p 8080:8080 \ -v $QUAY/config:/conf/stack:Z \ {productrepo}/{quayimage}:{productminv} config secret -.... +---- Use the `config` endpoint of the API to get the current configuration: diff --git a/modules/config-fields-aci.adoc b/modules/config-fields-aci.adoc index 5b8a4d2d9..718e61fe2 100644 --- a/modules/config-fields-aci.adoc +++ b/modules/config-fields-aci.adoc @@ -1,15 +1,14 @@ [[config-fields-aci]] -= ACI configuration += ACI configuration fields -.ACI configuration +.ACI configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description | **FEATURE_ACI_CONVERSION** | Boolean | Whether to enable conversion to ACIs + + **Default:** False -| {nbsp} | {nbsp} | {nbsp} | **GPG2_PRIVATE_KEY_FILENAME** | String | The filename of the private key used to decrypte ACIs | **GPG2_PRIVATE_KEY_NAME** | String | The name of the private key used to sign ACIs | **GPG2_PUBLIC_KEY_FILENAME** | String | The filename of the public key used to encrypt ACIs diff --git a/modules/config-fields-actionlog.adoc b/modules/config-fields-actionlog.adoc index 8a31ad7f2..80e934a80 100644 --- a/modules/config-fields-actionlog.adoc +++ b/modules/config-fields-actionlog.adoc @@ -10,7 +10,6 @@ | **FEATURE_LOG_EXPORT** | Boolean | Whether to allow exporting of action logs + + **Default:** `True` -| {nbsp} | {nbsp} | {nbsp} | **LOGS_MODEL** | String | Enable or disable the security scanner + + **Values:** One of `database`, `transition_reads_both_writes_es`, `elasticsearch` + @@ -76,7 +75,7 @@ | **FEATURE_ACTION_LOG_ROTATION** | Boolean | Enabling log rotation and archival will move all logs older than 30 days to storage + + **Default:** `false` -| {nbsp} | {nbsp} | {nbsp} + | **ACTION_LOG_ARCHIVE_LOCATION** | String | If action log archiving is enabled, the storage engine in which to place the archived data + + **Example:**: `s3_us_east` diff --git a/modules/config-fields-app-tokens.adoc b/modules/config-fields-app-tokens.adoc index 88a03da96..aa158f181 100644 --- a/modules/config-fields-app-tokens.adoc +++ b/modules/config-fields-app-tokens.adoc @@ -1,14 +1,13 @@ [[config-fields-app-tokens]] -= App tokens += App tokens configuration fields -.App tokens configuration +.App tokens configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description | **FEATURE_APP_SPECIFIC_TOKENS** | Boolean | If enabled, users can create tokens for use by the Docker CLI + + **Default:** True -| {nbsp} | {nbsp} | {nbsp} | **APP_SPECIFIC_TOKEN_EXPIRATION** | String | The expiration for external app tokens. + + **Default** None + diff --git a/modules/config-fields-basic.adoc b/modules/config-fields-basic.adoc index 219316323..02b490bfd 100644 --- a/modules/config-fields-basic.adoc +++ b/modules/config-fields-basic.adoc @@ -1,63 +1,44 @@ [[config-fields-basic]] -= Basic configuration - - - += Basic configuration fields .Basic configuration [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **REGISTRY_TITLE** | String | If specified, the long-form title for the registry + - + -**Default:** + -`Quay Enterprise` -| **REGISTRY_TITLE_SHORT** | String | If specified, the short-form title for the registry. + - + -**Default:** + -`Quay Enterprise` -| {nbsp} | {nbsp} |{nbsp} -| **BRANDING** | Object | Custom branding for logos and URLs in the {productname} UI. +| **REGISTRY_TITLE** | String | If specified, the long-form title for the registry. Displayed in frontend of your {productname} deployment, for example, at the sign in page of your organization. Should not exceed 35 characters. + + +**Default:** + +`Red Hat Quay` +| **REGISTRY_TITLE_SHORT** | String | If specified, the short-form title for the registry. Title is displayed on various pages of your organization, for example, as the title of the tutorial on your organization's *Tutorial* page. + + +**Default:** + +`Red Hat Quay` -| {nbsp}{nbsp}{nbsp}**.logo** + -(Required) | String | Main logo image URL + - + -**Example:** + -`/static/img/quay-horizontal-color.svg` -| {nbsp}{nbsp}{nbsp}**.footer_img** | String | Logo for UI footer + - + -**Example:** + -`/static/img/RedHat.svg` -| {nbsp}{nbsp}{nbsp}**.footer_url** | String | Link for footer image + - + -**Example:** + -`https://redhat.com` -| {nbsp} | {nbsp} |{nbsp} | **CONTACT_INFO** | Array of String | If specified, contact information to display on the contact page. If only a single piece of contact information is specified, the contact footer will link directly. -|{nbsp}{nbsp}{nbsp}**[0]** | String | Adds a link to send an e-mail + - + -**Pattern:** + -`^mailto:(.)+$` + -**Example:** + +|**[0]** | String | Adds a link to send an e-mail. + + + +**Pattern:** + +`^mailto:(.)+$` + +**Example:** + `mailto:support@quay.io` -|{nbsp}{nbsp}{nbsp}**[1]** | String | Adds a link to visit an IRC chat room + - + -**Pattern:** + -`^irc://(.)+$` + -**Example:** + +|**[1]** | String | Adds a link to visit an IRC chat room. + + + +**Pattern:** + +`^irc://(.)+$` + +**Example:** + `irc://chat.freenode.net:6665/quay` -|{nbsp}{nbsp}{nbsp}**[2]** | String | Adds a link to call a phone number+ - + -**Pattern:** + -`^tel:(.)+$` + -**Example:** + +|**[2]** | String | Adds a link to call a phone number.+ + + +**Pattern:** + +`^tel:(.)+$` + +**Example:** + `tel:+1-888-930-3475` -|{nbsp}{nbsp}{nbsp}**[3]** | String |Adds a link to a defined URL + - + -**Pattern:** + -`^http(s)?://(.)+$` + -**Example:** + +|**[3]** | String |Adds a link to a defined URL. + + + +**Pattern:** + +`^http(s)?://(.)+$` + +**Example:** + `https://twitter.com/quayio` |=== \ No newline at end of file diff --git a/modules/config-fields-branding.adoc b/modules/config-fields-branding.adoc new file mode 100644 index 000000000..9ddd89623 --- /dev/null +++ b/modules/config-fields-branding.adoc @@ -0,0 +1,38 @@ +:_content-type: CONCEPT +[id="config-fields-branding"] += Branding configuration fields + +.Branding configuration fields +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **BRANDING** | Object | Custom branding for logos and URLs in the {productname} UI. + +| **.logo** + +(Required) | String | Main logo image URL. + + +The header logo defaults to 205x30 PX. The form logo on the {productname} sign in screen of the web UI defaults to 356.5x39.7 PX. + + +**Example:** + +`/static/img/quay-horizontal-color.svg` +| **.footer_img** | String | Logo for UI footer. Defaults to 144x34 PX. + + + +**Example:** + +`/static/img/RedHat.svg` +| **.footer_url** | String | Link for footer image. + + + +**Example:** + +`https://redhat.com` +|=== + +[id="example-config-fields-branding"] +== Example configuration for {productname} branding + +.Branding config.yaml example +[source,yaml] +---- +BRANDING: + logo: https://www.mend.io/wp-content/media/2020/03/5-tips_small.jpg + footer_img: https://www.mend.io/wp-content/media/2020/03/5-tips_small.jpg + footer_url: https://opensourceworld.org/ +---- \ No newline at end of file diff --git a/modules/config-fields-build-logs.adoc b/modules/config-fields-build-logs.adoc index a8c2de7e3..fc7ea522a 100644 --- a/modules/config-fields-build-logs.adoc +++ b/modules/config-fields-build-logs.adoc @@ -1,14 +1,13 @@ [[config-fields-build-logs]] -= Build logs += Build logs configuration fields -.Build logs +.Build logs configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description | **FEATURE_READER_BUILD_LOGS** | Boolean | If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. + + **Default:** `False` -| {nbsp} | {nbsp} | {nbsp} | **LOG_ARCHIVE_LOCATION** | String | The storage location, defined in DISTRIBUTED_STORAGE_CONFIG, in which to place the archived build logs + + **Example:** `s3_us_east` diff --git a/modules/config-fields-clair-auth.adoc b/modules/config-fields-clair-auth.adoc new file mode 100644 index 000000000..131092576 --- /dev/null +++ b/modules/config-fields-clair-auth.adoc @@ -0,0 +1,18 @@ +:_content-type: CONCEPT +[id="config-fields-clair-auth"] += Clair authorization configuration fields + +The following authorization configuration fields are available for Clair. + +[cols="3a,1a,2a",options="header"] + +|=== +| Field | Type | Description +| **auth** | Object | Defines Clair's external and intra-service JWT based authentication. If multiple `auth` mechanisms are defined, Clair picks one. Currently, multiple mechanisms are unsupported. + +| **.psk** | String | Defines pre-shared key authentication. + +| **.psk.key** | String | A shared base64 encoded key distributed between all parties signing and verifying JWTs. + +| **.psk.iss** | String | A list of JWT issuers to verify. An empty list accepts any issuer in a JWT claim. +|=== diff --git a/modules/config-fields-clair-indexer.adoc b/modules/config-fields-clair-indexer.adoc new file mode 100644 index 000000000..08f2f05b1 --- /dev/null +++ b/modules/config-fields-clair-indexer.adoc @@ -0,0 +1,35 @@ +:_content-type: CONCEPT +[id="config-fields-clair-indexer"] += Clair indexer configuration fields + +The following indexer configuration fields are available for Clair. + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **indexer** | Object | Provides Clair indexer node configuration. + +| **.airgap** | Boolean | Disables HTTP access to the internet for indexers and fetchers. Private IPv4 and IPv6 addresses are allowed. Database connections are unaffected. + +| **.connstring** | String | A Postgres connection string. Accepts format as a URL or libpq connection string. + +| **.index_report_request_concurrency** | Integer | Rate limits the number of index report creation requests. Setting this to `0` attemps to auto-size this value. Setting a negative value means unlimited. The auto-sizing is a multiple of the number of available cores. + +The API returns a `429` status code if concurrency is exceeded. + +| **.scanlock_retry** | Integer | A positive integer representing seconds. Concurrent indexers lock on manifest scans to avoid clobbering. This value tunes how often a waiting indexer polls for the lock. + +| **.layer_scan_concurrency** | Integer | Positive integer limiting the number of concurrent layer scans. Indexers will match a manifest's layer concurrently. This value tunes the number of layers an indexer scans in parallel. + +| **.migrations** | Boolean | Whether indexer nodes handle migrations to their database. + +| **.scanner** | String | Indexer configuration. + +Scanner allows for passing configuration options to layer scanners. The scanner will have this configuration pass to it on construction if designed to do so. + +| **.scanner.dist** | String | A map with the name of a particular scanner and arbitrary YAML as a value. + +| **.scanner.package** | String | A map with the name of a particular scanner and arbitrary YAML as a value. + +| **.scanner.repo** | String | A map with the name of a particular scanner and arbitrary YAML as a value. +|=== \ No newline at end of file diff --git a/modules/config-fields-clair-matcher.adoc b/modules/config-fields-clair-matcher.adoc new file mode 100644 index 000000000..fd8668d27 --- /dev/null +++ b/modules/config-fields-clair-matcher.adoc @@ -0,0 +1,44 @@ +:_content-type: CONCEPT +[id="config-fields-clair-matcher"] += Clair matcher configuration fields + +The following matcher configuration fields are available for Clair. + +[NOTE] +==== +Differs from `matchers` configuration fields. +==== + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **matcher** | Object | Provides Clair matcher node configuration. + +| **.cache_age** | String | Controls how long users should be hinted to cache responses for. + +| **.connstring** | String | A Postgres connection string. Accepts format as a URL or libpq connection string. + +| **.max_conn_pool** | Integer | Limits the database connection pool size. + +Clair allows for a custom connection pool size. This number directly sets how many active database connections are allowed concurrently. + +This parameter will be ignored in a future version. Users should configure this through the connection string. + +| **.indexer_addr** | String | A matcher contacts an indexer to create a `VulnerabilityReport`. The location of this indexer is required. + +Defaults to `30m`. + +| **.migrations** | Boolean | Whether matcher nodes handle migrations to their databases. + +| **.period** | String | Determines how often updates for new security advisories take place. + +Defaults to `30m`. + +| **.disable_updaters** | Boolean | Whether to run background updates or not. + +| **.update_retention** | Integer | Sets the number of update operations to retain between garbage collection cycles. This should be set to a safe MAX value based on database size constraints. + +Defaults to `10m`. + +If a value of less than `0` is provided, garbage collection is disabled. `2` is the minimum value to ensure updates can be compared to notifications. +|=== \ No newline at end of file diff --git a/modules/config-fields-clair-matchers.adoc b/modules/config-fields-clair-matchers.adoc new file mode 100644 index 000000000..ab614504b --- /dev/null +++ b/modules/config-fields-clair-matchers.adoc @@ -0,0 +1,32 @@ +:_content-type: CONCEPT +[id="config-fields-clair-matchers"] += Clair matchers configuration fields + +The following matchers configuration fields are available for Clair. + +[NOTE] +==== +Differs from `matcher` configuration fields. +==== + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **matchers** | Array of strings | Provides configuration for the in-tree `matchers` and `remotematchers`. + +| **.names** | String | A list of string values informing the matcher factory about enabled matchers. If value is set to `null`, the default list of matchers run: +*alpine*, *aws*, *debian*, *oracle*, *photon*, *python*, *python*, *rhel*, *suse*, *ubuntu*, *crda* + +| **.config** | String | Provides configuration to a specific matcher. + +A map keyed by the name of the matcher containing a sub-object which will be provided to the matchers factory constructor. For example: + +[source,yaml] +---- +config: + python: + ignore_vulns: + - CVE-XYZ + - CVE-ABC +---- +|=== \ No newline at end of file diff --git a/modules/config-fields-clair-metrics.adoc b/modules/config-fields-clair-metrics.adoc new file mode 100644 index 000000000..983fd4cef --- /dev/null +++ b/modules/config-fields-clair-metrics.adoc @@ -0,0 +1,18 @@ +:_content-type: CONCEPT +[id="config-fields-clair-metrics"] += Clair metrics configuration fields + +The following metrics configuration fields are available for Clair. + +[cols="3a,1a,2a",options="header"] + +|=== +| Field | Type | Description +| **metrics** | Object | Defines distributed tracing configuration based on OpenTelemetry. + +| **.name** | String | The name of the metrics in use. + +| **.prometheus** | String | Configuration for a Prometheus metrics exporter. + +| **.prometheus.endpoint** | String | Defines the path where metrics are served. +|=== \ No newline at end of file diff --git a/modules/config-fields-clair-notifiers.adoc b/modules/config-fields-clair-notifiers.adoc new file mode 100644 index 000000000..eccfd2e6c --- /dev/null +++ b/modules/config-fields-clair-notifiers.adoc @@ -0,0 +1,107 @@ +:_content-type: CONCEPT +[id="config-fields-clair-notifiers"] += Clair notifier configuration fields + +The following notifier configuration fields are available for Clair. + +[cols="3a,1a,2a",options="header"] + +|=== +| Field | Type | Description +| **notifier** | Object | Provides Clair notifier node configuration. + +| **.connstring** | String | Postgres connection string. Accepts format as URL, or libpq connection string. + +| **.migrations** | Boolean | Whether notifier nodes handle migrations to their database. + +| **.indexer_addr** | String | A notifier contacts an indexer to create or obtain manifests affected by vulnerabilities. The location of this indexer is required. + +| **.matcher_addr** | String | A notifier contacts a matcher to list update operations and acquire diffs. The location of this matcher is required. + +| **.poll_interval** | String | The frequency at which the notifier will query a matcher for update operations. + +| **.delivery_interval** | String | The frequency at which the notifier attempts delivery of created, or previously failed, notifications. + +| **.disable_summary** | Boolean | Controls whether notifications should be summarized to one per manifest. + +| **.webhook** | Object | Configures the notifier for webhook delivery. + +| **.webhook.target** | String | URL where the webhook will be delivered. + +| **.webhook.callback** | String | The callback URL where notifications can be retrieved. The notification ID will be appended to this URL. + +This will typically be where the Clair notifier is hosted. + +| **.webhook.headers** | String | A map associating a header name to a list of values. + +| **.amqp** | Object | Configures the notifier for AMQP delivery. + +[NOTE] +==== +Clair does not declare any AMQP components on its own. All attempts to use an exchange or queue are passive only and will fail. Broker administrators should setup exchanges and queues ahead of time. +==== + +| **.amqp.direct** | Boolean | If `true`, the notifier will deliver individual notifications (not a callback) to the configured AMQP broker. + +| **.amqp.rollup** | Integer | When `amqp.direct` is set to `true`, this value informs the notifier of how many notifications to send in a direct delivery. For example, if `direct` is set to `true`, and `amqp.rollup` is set to `5`, the notifier delivers no more than 5 notifications in a single JSON payload to the broker. Setting the value to `0` effectively sets it to `1`. + +| **.amqp.exchange** | Object | The AMQP exchange to connect to. + +| **.amqp.exchange.name** | String | The name of the exchange to connect to. + +| **.amqp.exchange.type** | String | The type of the exchange. Typically one of the following: *direct*, *fanout*, *topic*, *headers*. + +| **.amqp.exchange.durability** | Boolean | Whether the configured queue is durable. + +| **.amqp.exchange.auto_delete** | Boolean | Whether the configured queue uses an `auto_delete_policy`. + +| **.amqp.routing_key** | String | The name of the routing key each notification is sent with. + +| **.amqp.callback** | String | If `amqp.direct` is set to `false`, this URL is provided in the notification callback sent to the broker. This URL should point to Clair's notification API endpoint. + +| **.amqp.uris** | String | A list of one or more AMQP brokers to connect to, in priority order. + +| **.amqp.tls** | Object | Configures TLS/SSL connection to an AMQP broker. + +| **.amqp.tls.root_ca** | String | The filesystem path where a root CA can be read. + +| **.amqp.tls.cert** | String | The filesystem path where a TLS/SSL certificate can be read. + +[NOTE] +==== +Clair also allows `SSL_CERT_DIR`, as documented for the Go `crypto/x509` package. +==== + +| **.amqp.tls.key** | String | The filesystem path where a TLS/SSL private key can be read. + +| **.stomp** | Object | Configures the notifier for STOMP delivery. + +| **.stomp.direct** | Boolean | If `true`, the notifier delivers individual notifications (not a callback) to the configured STOMP broker. + +| **.stomp.rollup** | Integer | If `stomp.direct` is set to `true`, this value limits the number of notifications sent in a single direct delivery. For example, if `direct` is set to `true`, and `rollup` is set to `5`, the notifier delivers no more than 5 notifications in a single JSON payload to the broker. Setting the value to `0` effectively sets it to `1`. + +| **.stomp.callback** | String | If `stomp.callback` is set to `false`, the provided URL in the notification callback is sent to the broker. This URL should point to Clair's notification API endpoint. + +| **.stomp.destination** | String | The STOMP destination to deliver notifications to. + +| **.stomp.uris** | String | A list of one or more STOMP brokers to connect to in priority order. + +| **.stomp.tls** | Object | Configured TLS/SSL connection to STOMP broker. + +| **.stomp.tls.root_ca** | String | The filesystem path where a root CA can be read. + +[NOTE] +==== +Clair also respects `SSL_CERT_DIR`, as documented for the Go `crypto/x509` package. +==== + +| **.stomp.tls.cert** | String | The filesystem path where a TLS/SSL certificate can be read. + +| **.stomp.tls.key** | String | The filesystem path where a TLS/SSL private key can be read. + +| **.stomp.user** | String | Configures login details for the STOMP broker. + +| **.stomp.user.login** | String | The STOMP login to connect with. + +| **.stomp.user.passcode** | String | The STOMP passcode to connect with. +|=== \ No newline at end of file diff --git a/modules/config-fields-clair-trace.adoc b/modules/config-fields-clair-trace.adoc new file mode 100644 index 000000000..499eb7e74 --- /dev/null +++ b/modules/config-fields-clair-trace.adoc @@ -0,0 +1,36 @@ +:_content-type: CONCEPT +[id="config-fields-clair-trace"] += Clair trace configuration fields + +The following trace configuration fields are available for Clair. + +[cols="3a,1a,2a",options="header"] + +|=== +| Field | Type | Description +| **trace** | Object | Defines distributed tracing configuration based on OpenTelemetry. + +| **.name** | String | The name of the application traces will belong to. + +| **.probability** | Integer | The probability a trace will occur. + +| **.jaeger** | Object | Defines values for Jaeger tracing. + +| **.jaeger.agent** | Object | Defines values for configuring delivery to a Jaeger agent. + +| **.jaeger.agent.endpoint** | String | An address in the `:` syntax where traces can be submitted. + +| **.jaeger.collector** | Object | Defines values for configuring delivery to a Jaeger collector. + +| **.jaeger.collector.endpoint** | String | An address in the `:` syntax where traces can be submitted. + +| **.jaeger.collector.username** | String | A Jaeger username. + +| **.jaeger.collector.password** | String | A Jaeger password. + +| **.jaeger.service_name** | String | The service name registered in Jaeger. + +| **.jaeger.tags** | String | Key-value pairs to provide additional metadata. + +| **.jaeger.buffer_max** | Integer | The maximum number of spans that can be buffered in memory before they are sent to the Jaeger backend for storage and analysis. +|=== \ No newline at end of file diff --git a/modules/config-fields-clair-updaters.adoc b/modules/config-fields-clair-updaters.adoc new file mode 100644 index 000000000..fcd0e6d8c --- /dev/null +++ b/modules/config-fields-clair-updaters.adoc @@ -0,0 +1,31 @@ +:_content-type: CONCEPT +[id="config-fields-clair-updaters"] += Clair updaters configuration fields + +The following updaters configuration fields are available for Clair. + +[cols="3a,1a,2a",options="header"] + +|=== +| Field | Type | Description +| **updaters** | Object | Provides configuration for the matcher's update manager. + +| **.sets** | String | A list of values informing the update manager which updaters to run. + +If value is set to `null`, the default set of updaters runs the following: *alpine*, *aws*, *debian*, *oracle*, *photon*, *pyupio*, *rhel*, *suse*, *ubuntu* + +If left blank, zero updaters run. + +| **.config** | String | Provides configuration to specific updater sets. + +A map keyed by the name of the updater set containing a sub-object which will be provided to the updater set's constructor. For example: + +[source,yaml] +---- +config: + ubuntu: + security_tracker_url: http://security.url + ignore_distributions: + - cosmic +---- +|=== \ No newline at end of file diff --git a/modules/config-fields-db.adoc b/modules/config-fields-db.adoc index e6537f469..b07932a20 100644 --- a/modules/config-fields-db.adoc +++ b/modules/config-fields-db.adoc @@ -1,71 +1,91 @@ -[[config-fields-db]] +:_content-type: CONCEPT +[id="config-fields-db"] = Database configuration -You configure the connection to the database using the required DB_URI field and optional connection arguments in the DB_CONNECTION_ARGS structure. Some key-value pairs defined under DB_CONNECTION_ARGS are generic while others are database-specific. In particular, SSL configuration depends on the database you are deploying, and examples for PostgreSQL and MySQL are given below. +This section describes the database configuration fields available for {productname} deployments. +[id="database-uri"] == Database URI +With {productname}, connection to the database is configured by using the required `DB_URI` field. + +The following table describes the `DB_URI` configuration field: + .Database URI [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description | **DB_URI** + -(Required) | String | The URI for accessing the database, including any credentials -|=== +(Required) | String | The URI for accessing the database, including any credentials. -**Example:** -``` -postgresql://quayuser:quaypass@quay-server.example.com:5432/quay -``` +Example `DB_URI` field: + +*postgresql://quayuser:quaypass@quay-server.example.com:5432/quay* +|=== +[id="database-connection-arguments"] == Database connection arguments +Optional connection arguments are configured by the `DB_CONNECTION_ARGS` parameter. Some of the key-value pairs defined under `DB_CONNECTION_ARGS` are generic, while others are database specific. + +The following table describes database connection arguments: + .Database connection arguments [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **DB_CONNECTION_ARGS** | Object | Optional connection arguments for the database, such as timeouts and SSL -| {nbsp}{nbsp}{nbsp}**.autorollback**| Boolean | Whether to use thread-local connections + - {nbsp} + -Should *ALWAYS* be `true` -| {nbsp}{nbsp}{nbsp}**.threadlocals**| Boolean | Whether to use auto-rollback connections + - {nbsp} + -Should *ALWAYS* be `true` +| **DB_CONNECTION_ARGS** | Object | Optional connection arguments for the database, such as timeouts and SSL. +| **.autorollback**| Boolean | Whether to use thread-local connections. + +Should always be `true` +| **.threadlocals**| Boolean | Whether to use auto-rollback connections. + +Should always be `true` // TODO 36 max_connections, timeout, stale_timeout // | {nbsp}{nbsp}{nbsp}.max_connections| Number | // | {nbsp}{nbsp}{nbsp}.timeout | Number | // | {nbsp}{nbsp}{nbsp}.stale_timeout | Number | |=== +[id="config-fields-postgres"] === PostgreSQL SSL connection arguments -A sample PostgreSQL SSL configuration is given below: +With SSL, configuration depends on the database you are deploying. The following example shows a PostgreSQL SSL configuration: +[source,yaml] ---- DB_CONNECTION_ARGS: sslmode: verify-ca sslrootcert: /path/to/cacert ---- -The `sslmode` option determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. There are six modes: +The `sslmode` option determines whether, or with, what priority a secure SSL TCP/IP connection will be negotiated with the server. There are six modes: + +.SSL options +[options="header"] +|=== +|Mode |Description + +| **disable** | Your configuration only tries non-SSL connections. +| **allow** | Your configuration first tries a non-SSL connection. Upon failure, tries an SSL connection. +| **prefer** + +(Default) | Your configuration first tries an SSL connection. Upon failure, tries a non-SSL connection. +| **require** | Your configuration only tries an SSL connection. If a root CA file is present, it verifies the certificate in the same way as if verify-ca was specified. +| **verify-ca** | Your configuration only tries an SSL connection, and verifies that the server certificate is issued by a trusted certificate authority (CA). +| **verify-full** | Only tries an SSL connection, and verifies that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate. +|=== -* **disable:** only try a non-SSL connection -* **allow:** first try a non-SSL connection; if that fails, try an SSL connection -* **prefer:** (default) first try an SSL connection; if that fails, try a non-SSL connection -* **require:** only try an SSL connection. If a root CA file is present, verify the certificate in the same way as if verify-ca was specified -* **verify-ca:** only try an SSL connection, and verify that the server certificate is issued by a trusted certificate authority (CA) -* **verify-full:** only try an SSL connection, verify that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate +For more information on the valid arguments for PostgreSQL, see link:https://www.postgresql.org/docs/current/libpq-connect.html[Database Connection Control Functions]. -More information on the valid arguments for PostgreSQL is available at link:https://www.postgresql.org/docs/current/libpq-connect.html[]. +[id="mysql-ssl-connection-arguments"] === MySQL SSL connection arguments -A sample MySQL SSL configuration follows: +The following example shows a sample MySQL SSL configuration: +[source.yaml] ---- DB_CONNECTION_ARGS: ssl: ca: /path/to/cacert ---- -Information on the valid connection arguments for MySQL is available at link:https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html[]. +Information on the valid connection arguments for MySQL is available at link:https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html[Connecting to the Server Using URI-Like Strings or Key-Value Pairs]. diff --git a/modules/config-fields-features-misc.adoc b/modules/config-fields-features-misc.adoc index 8df3f8b2d..02ee92735 100644 --- a/modules/config-fields-features-misc.adoc +++ b/modules/config-fields-features-misc.adoc @@ -48,9 +48,7 @@ | **FEATURE_REQUIRE_TEAM_INVITE** | Boolean | Whether to require invitations when adding a user to a team + + **Default:** True -| **FEATURE_RESTRICTED_V1_PUSH** | Boolean | If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push + - + -**Default:** True + |=== diff --git a/modules/config-fields-general-clair.adoc b/modules/config-fields-general-clair.adoc new file mode 100644 index 000000000..24e2ccafd --- /dev/null +++ b/modules/config-fields-general-clair.adoc @@ -0,0 +1,21 @@ +:_content-type: CONCEPT +[id="config-fields-required-clair"] += Clair general fields + +The following section describes the general configuration fields available for a Clair deployment: + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Typhttp_listen_ae | Description +| **http_listen_addr** | String | Configures where the HTTP API is exposed. + +Default: `:6060` + +| **introspection_addr** | String | Configures where Clair's metrics and health endpoints are exposed. + +| **log_level** | String | Sets the logging level. Requires one of the following strings: *debug-color*, *debug*, *info*, *warn*, *error*, *fatal*, *panic* + +| **tls** | String | A map containing the configuration for serving the HTTP API of TLS/SSL and HTTP/2. + +| **.cert** | String | The TLS certificate to be used. Must be a full-chain certificate. +|=== diff --git a/modules/config-fields-helm-oci.adoc b/modules/config-fields-helm-oci.adoc index 8de697a0d..189d0df21 100644 --- a/modules/config-fields-helm-oci.adoc +++ b/modules/config-fields-helm-oci.adoc @@ -1,5 +1,5 @@ [[config-fields-helm-oci]] -= OCI and Helm configuration += OCI and Helm configuration fields Support for Helm is now supported under the `FEATURE_GENERAL_OCI_SUPPORT` property. If you need to explicitly enable the feature, for example, if it has previously been disabled or if you have upgraded from a version where it is not enabled by default, you need to add two properties in the Quay configuration to enable the use of OCI artifacts: @@ -10,7 +10,7 @@ FEATURE_HELM_OCI_SUPPORT: true ---- -.OCI and Helm configuration +.OCI and Helm configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description @@ -24,5 +24,5 @@ FEATURE_HELM_OCI_SUPPORT: true [IMPORTANT] ==== -As of {productname} {producty}, `FEATURE_HELM_OCI_SUPPORT` has been deprecated and will be removed in a future version of {productname}. In {productname} {producty}, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. +As of {productname} 3.6, `FEATURE_HELM_OCI_SUPPORT` has been deprecated and will be removed in a future version of {productname}. In {productname} 3.6, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. ==== diff --git a/modules/config-fields-intro.adoc b/modules/config-fields-intro.adoc index a54fd004c..e4afb4fea 100644 --- a/modules/config-fields-intro.adoc +++ b/modules/config-fields-intro.adoc @@ -1,2 +1,5 @@ -[[config-fields-intro]] +:_content-type: CONCEPT +[id="config-fields-intro"] = Configuration fields + +This section describes the both required and optional configuration fields when deploying {productname}. \ No newline at end of file diff --git a/modules/config-fields-ipv6.adoc b/modules/config-fields-ipv6.adoc new file mode 100644 index 000000000..f395f913b --- /dev/null +++ b/modules/config-fields-ipv6.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="config-fields-ipv6"] += IPv6 configuration field + +.IPv6 configuration field +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **FEATURE_LISTEN_IP_VERSION** | String | Enables IPv4, IPv6, or dual-stack protocol family. This configuration field must be properly set, otherwise {productname} fails to start. + +*Default:* `IPv4` + +*Additional configurations:* `IPv6`, `dual-stack` +|=== \ No newline at end of file diff --git a/modules/config-fields-jwt.adoc b/modules/config-fields-jwt.adoc index ab593e61b..b744049ab 100644 --- a/modules/config-fields-jwt.adoc +++ b/modules/config-fields-jwt.adoc @@ -1,8 +1,8 @@ [[config-fields-jwt]] -= JTW configuration += JWT configuration fields -.JWT configuration +.JWT configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description diff --git a/modules/config-fields-ldap.adoc b/modules/config-fields-ldap.adoc index fa52a20be..7b6fd0404 100644 --- a/modules/config-fields-ldap.adoc +++ b/modules/config-fields-ldap.adoc @@ -1,8 +1,6 @@ [[config-fields-ldap]] = LDAP configuration fields - - .LDAP configuration [cols="3a,1a,2a",options="header"] |=== @@ -15,7 +13,6 @@ | **FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP** | Boolean | If enabled, non-superusers can setup syncing on teams using LDAP + + **Default:** `false` -| {nbsp} | {nbsp} | {nbsp} | **LDAP_ADMIN_DN** | String | The admin DN for LDAP authentication. | **LDAP_ADMIN_PASSWD** | String | The admin password for LDAP authentication. | **LDAP_ALLOW_INSECURE_FALLBACK** | Boolean | Whether or not to allow SSL insecure fallback for LDAP authentication. @@ -33,15 +30,32 @@ `2h` + **Default:** + `30m` + +| **LDAP_SUPERUSER_FILTER** | String | Subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as superusers when {productname} uses LDAP as its authentication provider. + +With this field, administrators can add or remove superusers without having to update the {productname} configuration file and restart their deployment. + +This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. + +| **LDAP_RESTRICTED_USER_FILTER** | String | Subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. + +This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. + |=== -== LDAP configuration example +[id="ldap-config-field-reference"] +== LDAP configuration field references + +Use the following references to update your `config.yaml` file with the desired configuration field. + +[id="reference-ldap-user"] +=== Basic LDAP user configuration -.$QUAY/config/config.yaml [source,yaml] ---- +--- AUTHENTICATION_TYPE: LDAP -... +--- LDAP_ADMIN_DN: uid=testuser,ou=Users,o=orgid,dc=jumpexamplecloud,dc=com LDAP_ADMIN_PASSWD: samplepassword LDAP_ALLOW_INSECURE_FALLBACK: false @@ -54,4 +68,59 @@ LDAP_UID_ATTR: uid LDAP_URI: ldap://ldap.example.com:389 LDAP_USER_RDN: - ou=Users +---- + +[id="reference-ldap-restricted-user"] +=== LDAP restricted user configuration + +[source,yaml] +---- +--- +AUTHENTICATION_TYPE: LDAP +--- +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_RESTRICTED_USER_FILTER: (=) +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +--- +---- + +[id="reference-ldap-super-user"] +=== LDAP superuser configuration reference + +[source,yaml] +---- +--- +AUTHENTICATION_TYPE: LDAP +--- +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_SUPERUSER_FILTER: (=) +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com ---- \ No newline at end of file diff --git a/modules/config-fields-legacy.adoc b/modules/config-fields-legacy.adoc index e566924f7..cdbe0a491 100644 --- a/modules/config-fields-legacy.adoc +++ b/modules/config-fields-legacy.adoc @@ -3,7 +3,7 @@ Some fields are deprecated or obsolete: -.Legacy fields +.Legacy configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description @@ -17,4 +17,13 @@ Some fields are deprecated or obsolete: **Default:** `<1.6.0` | **DOCUMENTATION_ROOT** | String | Root URL for documentation links | **SECURITY_SCANNER_V4_NAMESPACE_WHITELIST** | String | The namespaces for which the security scanner should be enabled + +| **FEATURE_RESTRICTED_V1_PUSH** | Boolean | If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push + + + +**Default:** True + +| **V1_PUSH_WHITELIST** | Array of String | The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true + + + |=== diff --git a/modules/config-fields-mail.adoc b/modules/config-fields-mail.adoc index 930d4808f..61089bc28 100644 --- a/modules/config-fields-mail.adoc +++ b/modules/config-fields-mail.adoc @@ -1,15 +1,14 @@ [[config-fields-mail]] -= Mail configuration += Mail configuration fields -.Mail fields +.Mail configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description | **FEATURE_MAILING** | Boolean | Whether emails are enabled + + **Default:** `False` -| {nbsp} | {nbsp} | {nbsp} | **MAIL_DEFAULT_SENDER** | String | If specified, the e-mail address used as the `from` when {productname} sends e-mails. If none, defaults to `support@quay.io` + + **Example:** `support@example.com` diff --git a/modules/config-fields-mirroring.adoc b/modules/config-fields-mirroring.adoc index 4485a2401..af03d8dfe 100644 --- a/modules/config-fields-mirroring.adoc +++ b/modules/config-fields-mirroring.adoc @@ -7,8 +7,7 @@ | Field | Type | Description | **FEATURE_REPO_MIRROR** | Boolean | Enable or disable repository mirroring + + - **Default:** `false` -| {nbsp} | {nbsp} | {nbsp} + **Default:** `false` | **REPO_MIRROR_INTERVAL** | Number | The number of seconds between checking for repository mirror candidates + + **Default:** 30 @@ -21,4 +20,9 @@ | **REPO_MIRROR_TLS_VERIFY** | Boolean | Require HTTPS and verify certificates of Quay registry during mirror. + + **Default:** `false` + +|**REPO_MIRROR_ROLLBACK** | Boolean | When set to `true`, the repository rolls back after a failed mirror attempt. + +*Default*: `false` + |=== \ No newline at end of file diff --git a/modules/config-fields-misc.adoc b/modules/config-fields-misc.adoc index 2ee4431fe..8debc1b14 100644 --- a/modules/config-fields-misc.adoc +++ b/modules/config-fields-misc.adoc @@ -1,8 +1,8 @@ [[config-fields-misc]] -= Miscellaneous fields += Miscellaneous configuration fields -.Miscellaneous fields +.Miscellaneous configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description @@ -39,8 +39,9 @@ | **SEARCH_RESULTS_PER_PAGE** | Number | Number of results returned per page by search page + + **Default:** 10 -| **V1_PUSH_WHITELIST** | Array of String | The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true -| **V2_PAGINATION_SIZE** | Number | The number of results returned per page in V2 registry APIs +| **V2_PAGINATION_SIZE** | Number | The number of results returned per page in V2 registry APIs + + + + **Default:** 50 | **WEBHOOK_HOSTNAME_BLACKLIST** | Array of String | The set of hostnames to disallow from webhooks when validating, beyond localhost | **CREATE_PRIVATE_REPO_ON_PUSH** | Boolean | Whether new repositories created by push are set to private visibility + + @@ -49,4 +50,203 @@ + **Default:** False | **NON_RATE_LIMITED_NAMESPACES** | Array of String | If rate limiting has been enabled using `FEATURE_RATE_LIMITS`, you can override it for specific namespace that require unlimited access. + +| xref:reference-miscellaneous-v2-ui[**FEATURE_UI_V2**] | Boolean | When set, allows users to try the beta UI environment. + +*Default:* `True` |=== + +//Consider removing in 3.9 + +[id="miscellaneous-config-field-reference"] +== Miscellaneous configuration field references + +Use the following references to update your `config.yaml` file with the desired configuration field. + +[id="reference-miscellaneous-v2-ui"] +=== v2 user interface configuration + +With `FEATURE_UI_V2` enabled, you can toggle between the current version of the user interface and the new version of the user interface. + +[IMPORTANT] +==== +* This UI is currently in beta and subject to change. In its current state, users can only create, view, and delete organizations, repositories, and image tags. +* When running {productname} in the old UI, timed-out sessions would require that the user input their password again in the pop-up window. With the new UI, users are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. +* There is a discrepancy in how image manifest sizes are reported between the legacy UI and the new UI. In the legacy UI, image manifests were reported in mebibytes. In the new UI, {productname} uses the standard definition of megabyte (MB) to report image manifest sizes. +==== + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_UI_V2` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +FEATURE_TEAM_SYNCING: false +FEATURE_UI_V2: true +FEATURE_USER_CREATION: true +--- +---- + +. Log in to your {productname} deployment. + +. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to new UI, and then click *Use Beta Environment*, for example: ++ +image:38-ui-toggle.png[{productname} 3.8 UI toggle] + +[id="creating-new-organization-v2-ui"] +==== Creating a new organization in the {productname} 3.8 beta UI + +.Prerequisites + +* You have toggled your {productname} deployment to use the 3.8 beta UI. + +Use the following procedure to create an organization using the {productname} 3.8 beta UI. + +.Procedure + +. Click *Organization* in the navigation pane. + +. Click *Create Organization*. + +. Enter an *Organization Name*, for example, `testorg`. + +. Click *Create*. + +Now, your example organization should populate under the *Organizations* page. + +[id="deleting-organization-v2"] +==== Deleting an organization using the {productname} 3.8 beta UI + +Use the following procedure to delete an organization using the {productname} 3.8 beta UI. + +.Procedure + +. On the *Organizations* page, select the name of the organization you want to delete, for example, `testorg`. + +. Click the *More Actions* drop down menu. + +. Click *Delete*. ++ +[NOTE] +==== +On the *Delete* page, there is a *Search* input box. With this box, users can search for specific organizations to ensure that they are properly scheduled for deletion. For example, if a user is deleting 10 organizations and they want to ensure that a specific organization was deleted, they can use the *Search* input box to confirm said organization is marked for deletion. +==== + +. Confirm that you want to permanently delete the organization by typing *confirm* in the box. + +. Click *Delete*. + +After deletion, you are returned to the *Organizations* page. + +[NOTE] +==== +You can delete more than one organization at a time by selecting multiple organizations, and then clicking *More Actions* -> *Delete*. +==== + +[id="creating-new-repository-v2"] +==== Creating a new repository using the {productname} 3.8 beta UI + +Use the following procedure to create a repository using the {productname} 3.8 beta UI. + +.Procedure + +. Click *Repositories* on the navigation pane. + +. Click *Create Repository*. + +. Select a namespace, for example, *quayadmin*, and then enter a *Repository name*, for example, `testrepo`. + +. Click *Create*. + +Now, your example repository should populate under the *Repositories* page. + +[id="deleting-repository-v2"] +==== Deleting a repository using the {productname} 3.8 beta UI + +.Prerequisites + +* You have created a repository. + +.Procedure + +. On the *Repositories* page of the {productname} 3.8 beta UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +[id="pushing-image-v2"] +==== Pushing an image to the {productname} 3.8 beta UI + +Use the following procedure to push an image to the {productname} 3.8 beta UI. + +.Procedure + +. Pull a sample image from an external registry: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your {productname} registry: ++ +[source,terminal] +---- +$ podman push quay-server.example.com/quayadmin/busybox:test +---- + +. Navigate to the *Repositories* page on the {productname} UI and ensure that your image has been properly pushed. + +. You can check the security details by selecting your image tag, and then navigating to the *Security Report* page. + +[id="deleting-image-v2"] +==== Deleting an image using the {productname} 3.8 beta UI + +Use the following procedure to delete an image using the{productname} 3.8 beta UI. + +.Prerequisites + +* You have pushed an image to your {productname} registry. + +.Procedure + +. On the *Repositories* page of the {productname} 3.8 beta UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +[id="enabling-legacy-ui"] +==== Enabling the {productname} legacy UI + +. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to *Current UI*. ++ +image:38-ui-toggle.png[{productname} 3.8 UI toggle] + diff --git a/modules/config-fields-modelcache-clustered-redis.adoc b/modules/config-fields-modelcache-clustered-redis.adoc new file mode 100644 index 000000000..eb2c5fdb5 --- /dev/null +++ b/modules/config-fields-modelcache-clustered-redis.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="config-fields-modelcache-clustered-redis"] += Clustered Redis configuration option + +Use the following configuration for a clustered Redis instance: + +[source,yaml] +---- + DATA_MODEL_CACHE_CONFIG: + engine: rediscluster + redis_config: + startup_nodes: + - host: + port: + password: + read_from_replicas: + skip_full_coverage_check: + ssl: +---- \ No newline at end of file diff --git a/modules/config-fields-modelcache-memcache.adoc b/modules/config-fields-modelcache-memcache.adoc new file mode 100644 index 000000000..bf2ce6b9d --- /dev/null +++ b/modules/config-fields-modelcache-memcache.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="config-fields-modelcache-memcache"] += Memcache configuration option + +Memcache is the default ModelCache configuration option. With Memcache, no additional configuration is necessary. diff --git a/modules/config-fields-modelcache-single-redis.adoc b/modules/config-fields-modelcache-single-redis.adoc new file mode 100644 index 000000000..7a255e2e4 --- /dev/null +++ b/modules/config-fields-modelcache-single-redis.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="config-fields-modelcache-single-redis"] += Single Redis configuration option + +The following configuration is for a single Redis instance with optional read-only replicas: + +[source,yaml] +---- + DATA_MODEL_CACHE_CONFIG: + engine: redis + redis_config: + primary: + host: + port: + password: + ssl: + replica: + host: + port: + password: + ssl: +---- \ No newline at end of file diff --git a/modules/config-fields-modelcache.adoc b/modules/config-fields-modelcache.adoc new file mode 100644 index 000000000..7bc07d254 --- /dev/null +++ b/modules/config-fields-modelcache.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="config-fields-modelcache"] += ModelCache configuration options + +The following options are available on {productname} for configuring ModelCache. \ No newline at end of file diff --git a/modules/config-fields-nested-repositories.adoc b/modules/config-fields-nested-repositories.adoc index 56ee2771e..5f68d2e06 100644 --- a/modules/config-fields-nested-repositories.adoc +++ b/modules/config-fields-nested-repositories.adoc @@ -1,19 +1,19 @@ [[config-fields-nested-repositories]] -= Configuring nested repositories += Nested repositories configuration fields -With {productname} {producty}, support for nested repository path names has been added under the `FEATURE_EXTENDED_REPOSITORY_NAMES` property. This optional configuration must be manually added to the config.yaml by the user to enable support. Enablement allows the use of `/` in repository names. +With {productname} 3.6, support for nested repository path names has been added under the `FEATURE_EXTENDED_REPOSITORY_NAMES` property. This optional configuration is added to the config.yaml by default. Enablement allows the use of `/` in repository names. [source,yaml] ---- FEATURE_EXTENDED_REPOSITORY_NAMES: true ---- -.OCI and nested repositories configuration +.OCI and nested repositories configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_EXTENDED_REPOSITORY_NAMES** | Boolean | Enable support for nested repositories + - + -**Default:** False +| **FEATURE_EXTENDED_REPOSITORY_NAMES** | Boolean | Enable support for nested repositories + + + +**Default:** True |=== diff --git a/modules/config-fields-oauth.adoc b/modules/config-fields-oauth.adoc index aa1128235..e1c0c96a3 100644 --- a/modules/config-fields-oauth.adoc +++ b/modules/config-fields-oauth.adoc @@ -1,6 +1,5 @@ [[config-fields-oauth]] -= OAuth configuration - += OAuth configuration fields .OAuth fields [cols="3a,1a,2a",options="header"] @@ -10,7 +9,7 @@ |=== -== GitHub OAuth +== GitHub OAuth configuration fields .GitHub OAuth fields [cols="3a,1a,2a",options="header"] @@ -40,7 +39,7 @@ |=== -== Google OAuth +== Google OAuth configuration fields .Google OAuth fields [cols="3a,1a,2a",options="header"] diff --git a/modules/config-fields-optional-intro.adoc b/modules/config-fields-optional-intro.adoc index ba0f08d73..a1bbb2299 100644 --- a/modules/config-fields-optional-intro.adoc +++ b/modules/config-fields-optional-intro.adoc @@ -1,7 +1,8 @@ -[[config-fields-optional-intro]] +:_content-type: CONCEPT +[id="config-fields-optional-intro"] = Optional configuration fields -Optional fields are covered in the following sections: +Optional fields for {productname} can be found in the following sections: * xref:config-fields-basic[Basic configuration] * xref:config-fields-ssl[SSL] @@ -23,4 +24,5 @@ Optional fields are covered in the following sections: * xref:config-fields-app-tokens[App tokens] * xref:config-fields-misc[Miscellaneous] * xref:config-fields-legacy[Legacy options] - +* xref:config-fields-v2-ui[User interface v2] +* xref:config-fields-ipv6[IPv6 configuration field] \ No newline at end of file diff --git a/modules/config-fields-overview.adoc b/modules/config-fields-overview.adoc new file mode 100644 index 000000000..12fb2947a --- /dev/null +++ b/modules/config-fields-overview.adoc @@ -0,0 +1,97 @@ +:_content-type: CONCEPT +[id="config-fields-overview"] += Clair configuration overview + +Clair is configured by a structured YAML file. Each Clair node needs to specify what mode it will run in and a path to a configuration file through CLI flags or environment variables. For example: + +[source,terminal] +---- +$ clair -conf ./path/to/config.yaml -mode indexer +---- + +or + +[source,terminal] +---- +$ clair -conf ./path/to/config.yaml -mode matcher +---- + +The aforementioned commands each start two Clair nodes using the same configuration file. One runs the indexing facilities, while other runs the matching facilities. + +Environment variables respected by the Go standard library can be specified if needed, for example: + +* `HTTP_PROXY` +* `HTTPS_PROXY` +* `SSL_CERT_DIR` + +If you are running Clair in `combo` mode, you must supply the indexer, matcher, and notifier configuration blocks in the configuration. + +[id="config-fields-clair-reference"] +== Clair configuration reference + +The following YAML shows an example Clair configuration: + +[source,yaml] +---- +http_listen_addr: "" +introspection_addr: "" +log_level: "" +tls: {} +indexer: + connstring: "" + scanlock_retry: 0 + layer_scan_concurrency: 0 + migrations: false + scanner: {} + airgap: false +matcher: + connstring: "" + indexer_addr: "" + migrations: false + period: "" + disable_updaters: false + update_retention: 2 +matchers: + names: nil + config: nil +updaters: + sets: nil + config: nil +notifier: + connstring: "" + migrations: false + indexer_addr: "" + matcher_addr: "" + poll_interval: "" + delivery_interval: "" + disable_summary: false + webhook: null + amqp: null + stomp: null +auth: + psk: nil +trace: + name: "" + probability: null + jaeger: + agent: + endpoint: "" + collector: + endpoint: "" + username: null + password: null + service_name: "" + tags: nil + buffer_max: 0 +metrics: + name: "" + prometheus: + endpoint: null + dogstatsd: + url: "" +---- + +[NOTE] +==== +The above YAML file lists every key for completeness. Using this configuration file as-is will result in some options not having their defaults set normally. +==== \ No newline at end of file diff --git a/modules/config-fields-quota.adoc b/modules/config-fields-quota.adoc index 0ca18e83e..d4ddfca72 100644 --- a/modules/config-fields-quota.adoc +++ b/modules/config-fields-quota.adoc @@ -1,9 +1,32 @@ [[config-fields-quota]] = Quota management configuration -Quota management is now supported under the `FEATURE_QUOTA_MANAGEMENT` property. Quota management is turned off by default, so to enable it, set the feature flag in your `config.yaml` to `true`: +Quota management is now supported under the `FEATURE_QUOTA_MANAGEMENT` property and is turned off by default. To enable quota management, set the feature flag in your `config.yaml` to `true`: [source,yaml] ---- FEATURE_QUOTA_MANAGEMENT: true ----- \ No newline at end of file +---- + + +[NOTE] +==== +In {productname} 3.7, superuser privileges are required to create, update and delete quotas. While quotas can be set for users as well as organizations, you cannot reconfigure the _user_ quota using the {productname} UI and you must use the API instead. +==== + + +== Default quota + +To specify a system-wide default storage quota that is applied to every organization and user, use the *DEFAULT_SYSTEM_REJECT_QUOTA_BYTES* configuration flag. + +.Default quota configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **DEFAULT_SYSTEM_REJECT_QUOTA_BYTES** | String | The quota size to apply to all organizations and users. + + + +By default, no limit is set. +|=== + + +If you configure a specific quota for an organization or user, and then delete that quota, the system-wide default quota will apply if one has been set. Similarly, if you have configured a specific quota for an organization or user, and then modify the system-wide default quota, the updated system-wide default will override any specific settings. \ No newline at end of file diff --git a/modules/config-fields-recaptcha.adoc b/modules/config-fields-recaptcha.adoc index 7f86c1f8d..e6ebaf082 100644 --- a/modules/config-fields-recaptcha.adoc +++ b/modules/config-fields-recaptcha.adoc @@ -1,14 +1,13 @@ [[config-fields-recaptcha]] -= Recaptcha configuration += Recaptcha configuration fields -.Recaptcha fields +.Recaptcha configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description | **FEATURE_RECAPTCHA** | Boolean | Whether Recaptcha is necessary for user login and recovery + + **Default:** False -| {nbsp} | {nbsp} | {nbsp} | **RECAPTCHA_SECRET_KEY** | String | If recaptcha is enabled, the secret key for the Recaptcha service | **RECAPTCHA_SITE_KEY** | String | If recaptcha is enabled, the site key for the Recaptcha service |=== \ No newline at end of file diff --git a/modules/config-fields-redis.adoc b/modules/config-fields-redis.adoc index a93bd7426..84dd1e8e4 100644 --- a/modules/config-fields-redis.adoc +++ b/modules/config-fields-redis.adoc @@ -1,67 +1,105 @@ -[[config-fields-redis]] +:_content-type: CONCEPT +[id="config-fields-redis"] = Redis configuration fields +This section details the configuration fields available for Redis deployments. == Build logs +The following build logs configuration fields are available for Redis deployments: + .Build logs configuration [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description | **BUILDLOGS_REDIS** + -(Required) | Object | Redis connection details for build logs caching -|{nbsp}{nbsp}{nbsp}**.host** + -{nbsp}{nbsp}{nbsp}(Required)| String | The hostname at which Redis is accessible + - {nbsp} + +(Required) | Object | Redis connection details for build logs caching. +|**.host** + +(Required)| String | The hostname at which Redis is accessible. + **Example:** + -`quay-server.example.com` -|{nbsp}{nbsp}{nbsp}**.port** + -{nbsp}{nbsp}{nbsp}(Required)| Number | The port at which Redis is accessible + - {nbsp} + +`quay-server.example.com` +|**.port** + +(Required)| Number | The port at which Redis is accessible. + **Example:** + `6379` -|{nbsp}{nbsp}{nbsp}**.password** | String | The port at which Redis is accessible + - {nbsp} + +|**.password** | String | The password to connect to the Redis instance. + **Example:** + -`strongpassword` +`strongpassword` +| **.ssl** + +(Optional) | Boolean | Whether to enable TLS communication between Redis and Quay. Defaults to false. |=== - +[id="user-event-fields-redis"] == User events +The following user event fields are available for Redis deployments: + .User events config [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description | **USER_EVENTS_REDIS** + -(Required) | Object | Redis connection details for user event handling -|{nbsp}{nbsp}{nbsp}**.host** + -{nbsp}{nbsp}{nbsp}(Required)| String | The hostname at which Redis is accessible + - {nbsp} + +(Required) | Object | Redis connection details for user event handling. +|**.host** + +(Required)| String | The hostname at which Redis is accessible. + **Example:** + -`quay-server.example.com` -|{nbsp}{nbsp}{nbsp}**.port** + -{nbsp}{nbsp}{nbsp}(Required)| Number | The port at which Redis is accessible + - {nbsp} + +`quay-server.example.com` +|**.port** + +(Required)| Number | The port at which Redis is accessible. + **Example:** + `6379` -|{nbsp}{nbsp}{nbsp}**.password** | String | The port at which Redis is accessible + - {nbsp} + +|**.password** | String | The password to connect to the Redis instance. + +**Example:** + +`strongpassword` +| **.ssl** | Boolean | Whether to enable TLS communication between Redis and Quay. Defaults to false. +| **.ssl_keyfile** + +(Optional) | String | The name of the key database file, which houses the client certificate to be used. + +**Example:** + +`ssl_keyfile: /path/to/server/privatekey.pem` +| **.ssl_certfile** + +(Optional) | String | Used for specifying the file path of the SSL certificate. + +**Example:** + +`ssl_certfile: /path/to/server/certificate.pem` +| **.ssl_cert_reqs** + +(Optional) | String | Used to specify the level of certificate validation to be performed during the SSL/TLS handshake. + **Example:** + -`strongpassword` +`ssl_cert_reqs: CERT_REQUIRED` +| **.ssl_ca_certs** + +(Optional) | String | Used to specify the path to a file containing a list of trusted Certificate Authority (CA) certificates. + +**Example:** + +`ssl_ca_certs: /path/to/ca_certs.pem` +| **.ssl_ca_data** + +(Optional) | String | Used to specify a string containing the trusted CA certificates in PEM format. + +**Example:** + +`ssl_ca_data: ` +| **.ssl_check_hostname ** + +(Optional) | Boolean | Used when setting up an SSL/TLS connection to a server. It specifies whether the client should check that the hostname in the server's SSL/TLS certificate matches the hostname of the server it is connecting to. + +**Example:** + +`ssl_check_hostname: true` |=== +[id="example-redis-configuration"] +== Example Redis configuration +The following YAML shows a sample configuration using Redis with optional SSL/TLS fields: -== Example redis configuration - -``` +[source,yaml] +---- BUILDLOGS_REDIS: - host: quay-server.example.com - password: strongpassword - port: 6379 + host: quay-server.example.com + password: strongpassword + port: 6379 + ssl: true + USER_EVENTS_REDIS: - host: quay-server.example.com - password: strongpassword - port: 6379 -``` \ No newline at end of file + host: quay-server.example.com + password: strongpassword + port: 6379 + ssl: true + ssl_*: +---- + +[NOTE] +==== +If your deployment uses Azure Cache for Redis and `ssl` is set to `true`, the port defaults to `6380`. +==== diff --git a/modules/config-fields-required-general.adoc b/modules/config-fields-required-general.adoc index a82a0db96..33c7933d9 100644 --- a/modules/config-fields-required-general.adoc +++ b/modules/config-fields-required-general.adoc @@ -1,33 +1,36 @@ -[[config-fields-required-general]] +:_content-type: CONCEPT +[id="config-fields-required-general"] = General required fields +The following table describes the required configuration fields for a {productname} deployment: + .General required fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description | **AUTHENTICATION_TYPE** + -(Required) | String | The authentication engine to use for credential authentication + +(Required) | String | The authentication engine to use for credential authentication. + + **Values:** + One of `Database`, `LDAP`, `JWT`, `Keystone`, `OIDC` + + **Default:** `Database` | **PREFERRED_URL_SCHEME** + -(Required) | String | The URL scheme to use when accessing {productname} + +(Required) | String | The URL scheme to use when accessing {productname}. + + **Values:** + One of `http`, `https` + + **Default:** `http` | **SERVER_HOSTNAME** + -(Required) | String | The URL at which {productname} is accessible, without the scheme + +(Required) | String | The URL at which {productname} is accessible, without the scheme. + + **Example:** + `quay-server.example.com` | **DATABASE_SECRET_KEY** + (Required) | String | Key used to encrypt sensitive fields within the database. This value should never be changed once set, otherwise all reliant fields, for example, repository mirror username and password configurations, are invalidated. | **SECRET_KEY** + -(Required) | String | Key used to encrypt sensitive fields within the database and at run time. his value should never be changed once set, otherwise all reliant fields, for example, encrypted password credentials, are invalidated. +(Required) | String | Key used to encrypt sensitive fields within the database and at run time. This value should never be changed once set, otherwise all reliant fields, for example, encrypted password credentials, are invalidated. | **SETUP_COMPLETE** + (Required) | Boolean | This is an artefact left over from earlier versions of the software and currently it **must** be specified with a value of `true`. |=== diff --git a/modules/config-fields-required-intro.adoc b/modules/config-fields-required-intro.adoc index 89f25d353..11d61efef 100644 --- a/modules/config-fields-required-intro.adoc +++ b/modules/config-fields-required-intro.adoc @@ -1,7 +1,9 @@ -[[config-fields-required-intro]] +:_content-type: CONCEPT +[id="config-fields-required-intro"] + = Required configuration fields -Required fields are covered in the following sections: +The fields required to configure {productname} are covered in the following sections: * xref:config-fields-required-general[General required fields] * xref:config-fields-redis[Storage for images] diff --git a/modules/config-fields-ssl.adoc b/modules/config-fields-ssl.adoc index 2c740e9bf..ccf4a9402 100644 --- a/modules/config-fields-ssl.adoc +++ b/modules/config-fields-ssl.adoc @@ -11,7 +11,7 @@ Users must set their `PREFERRED_URL_SCHEME`to `https` when using a TLS-terminati + **Default:** `http` | **SERVER_HOSTNAME** + -(Required) | String | The URL at which {productname} is accessible, without the scheme + +(Required) | String | The URL at which {productname} is accessible, without the scheme + + **Example:** + `quay-server.example.com` @@ -23,7 +23,7 @@ Users must set their `PREFERRED_URL_SCHEME`to `https` when using a TLS-terminati | **SSL_PROTOCOLS** | Array of String | If specified, nginx is configured to enabled a list of SSL protocols defined in the list. Removing an SSL protocol from the list disables the protocol during {productname} startup. + + **Example:** + -`['TLSv1','TLSv1.1','TLSv1.2']` +`['TLSv1','TLSv1.1','TLSv1.2', `TLSv1.3`]` | **SESSION_COOKIE_SECURE** | Boolean | Whether the `secure` property should be set on session cookies + + **Default:** + diff --git a/modules/config-fields-storage-aws.adoc b/modules/config-fields-storage-aws.adoc index e9b9526a2..370755c8a 100644 --- a/modules/config-fields-storage-aws.adoc +++ b/modules/config-fields-storage-aws.adoc @@ -1,6 +1,10 @@ -[[config-fields-storage-aws]] +:_content-type: CONCEPT +[id="config-fields-storage-aws"] = AWS S3 storage +The following YAML shows a sample configuration using AWS S3 storage: + + [source,yaml] ---- DISTRIBUTED_STORAGE_CONFIG: diff --git a/modules/config-fields-storage-azure.adoc b/modules/config-fields-storage-azure.adoc index 59caca48f..983da1406 100644 --- a/modules/config-fields-storage-azure.adoc +++ b/modules/config-fields-storage-azure.adoc @@ -1,15 +1,17 @@ -[[config-fields-storage-azure]] -= Azure storage +:_content-type: CONCEPT +[id="config-fields-storage-azure"] += Azure Storage +The following YAML shows a sample configuration using Azure Storage: [source,yaml] ---- -DISTRIBUTED_STORAGE_CONFIG +DISTRIBUTED_STORAGE_CONFIG: azureStorage: - AzureStorage + - azure_account_name: azure_account_name_here azure_container: azure_container_here storage_path: /datastorage/registry - - azure_account_name: azure_account_name_here azure_account_key: azure_account_key_here sas_token: some/path/ endpoint_url: https://[account-name].blob.core.usgovcloudapi.net <1> @@ -17,4 +19,6 @@ DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] DISTRIBUTED_STORAGE_PREFERENCE: - azureStorage ---- -<1> The `endpoint_url` parameter for Azure storage is optional. If left blank, the `endpoint_url` will connect to the normal Azure region. +<1> The `endpoint_url` parameter for Azure storage is optional and can be used with Microsoft Azure Government (MAG) endpoints. If left blank, the `endpoint_url` will connect to the normal Azure region. ++ +As of {productname} 3.7, you must use the Primary endpoint of your MAG Blob service. Using the Secondary endpoint of your MAG Blob service will result in the following error: `AuthenticationErrorDetail:Cannot find the claimed account when trying to GetProperties for the account whusc8-secondary`. diff --git a/modules/config-fields-storage-features.adoc b/modules/config-fields-storage-features.adoc index 97d27a70d..720c146ef 100644 --- a/modules/config-fields-storage-features.adoc +++ b/modules/config-fields-storage-features.adoc @@ -1,17 +1,20 @@ -[[config-fields-storage-features]] +:_content-type: CONCEPT +[id="config-fields-storage-features"] = Image storage features +The following table describes the image storage features for {productname}: + .Storage config features [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_REPO_MIRROR** | Boolean | If set to true, enables repository mirroring + +| **FEATURE_REPO_MIRROR** | Boolean | If set to true, enables repository mirroring. + + -**Default:** `False` -|**FEATURE_PROXY_STORAGE** | Boolean | Whether to proxy all direct download URLs in storage via the registry nginx + +**Default:** `false` +|**FEATURE_PROXY_STORAGE** | Boolean | Whether to proxy all direct download URLs in storage through NGINX. + + -**Default:** `False` -| **FEATURE_STORAGE_REPLICATION** | Boolean | Whether to automatically replicate between storage engines + +**Default:** `false` +| **FEATURE_STORAGE_REPLICATION** | Boolean | Whether to automatically replicate between storage engines. + + -**Default:** `False` +**Default:** `false` |=== diff --git a/modules/config-fields-storage-fields.adoc b/modules/config-fields-storage-fields.adoc index 8dbc0aaeb..63ce38b25 100644 --- a/modules/config-fields-storage-fields.adoc +++ b/modules/config-fields-storage-fields.adoc @@ -1,29 +1,28 @@ -[[config-fields-storage-fields]] +:_content-type: CONCEPT +[id="config-fields-storage-fields"] = Image storage configuration fields -You specify a list of all storage engines using the DISTRIBUTED_STORAGE_CONFIG field, and choose you preferred storage engine(s) using the DISTRIBUTED_STORAGE_PREFERENCE field. - -The DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS field is used to control which locations will have their images replicated by default. +The following table describes the image storage configuration fields for {productname}: .Storage config fields [cols="3a,1a,2a",options="header"] |=== -| Field | Type | Description +| Field | Type | Description | **DISTRIBUTED_STORAGE_CONFIG** + -(Required) | Object | Configuration for storage engine(s) to use in Red Hat Quay. Each key represents an unique identifier for a storage engine. The value consists of a tuple of (key, value) forming an object describing the storage engine parameters. + - + +(Required) | Object | Configuration for storage engine(s) to use in {productname}. Each key represents an unique identifier for a storage engine. The value consists of a tuple of (key, value) forming an object describing the storage engine parameters. + + + **Default:** `[]` | **DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS** + -(Required) | Array of string | The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose images should be fully replicated, by default, to all other storage engines. +(Required) | Array of string | The list of storage engine(s) (by ID in `DISTRIBUTED_STORAGE_CONFIG`) whose images should be fully replicated, by default, to all other storage engines. | **DISTRIBUTED_STORAGE_PREFERENCE** + -(Required) | Array of string | The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to use. A preferred engine means it is first checked for pulling and images are pushed to it. + - + +(Required) | Array of string | The preferred storage engine(s) (by ID in `DISTRIBUTED_STORAGE_CONFIG`) to use. A preferred engine means it is first checked for pulling and images are pushed to it. + + + **Default:** `false` - | **MAXIMUM_LAYER_SIZE** | String | Maximum allowed size of an image layer + - + -**Pattern**: `^[0-9]+(G\|M)$` + - + -**Example**: `100G` + - + + | **MAXIMUM_LAYER_SIZE** | String | Maximum allowed size of an image layer. + + + +**Pattern**: `^[0-9]+(G\|M)$` + + + +**Example**: `100G` + + + **Default:** `20G` -|=== \ No newline at end of file +|=== diff --git a/modules/config-fields-storage-gcp.adoc b/modules/config-fields-storage-gcp.adoc index 80e41707a..ad630bd7e 100644 --- a/modules/config-fields-storage-gcp.adoc +++ b/modules/config-fields-storage-gcp.adoc @@ -1,5 +1,8 @@ -[[config-fields-storage-gcp]] -= Google cloud storage +:_content-type: CONCEPT +[id="config-fields-storage-gcp"] += Google Cloud Storage + +The following YAML shows a sample configuration using Google Cloud Storage: [source,yaml] ---- diff --git a/modules/config-fields-storage-local.adoc b/modules/config-fields-storage-local.adoc index da3f824b1..12133d16b 100644 --- a/modules/config-fields-storage-local.adoc +++ b/modules/config-fields-storage-local.adoc @@ -1,12 +1,15 @@ -[[config-fields-storage-local]] +:_content-type: CONCEPT +[id="config-fields-storage-local"] = Local storage +The following YAML shows a sample configuration using local storage: + [source,yaml] ---- -DISTRIBUTED_STORAGE_CONFIG +DISTRIBUTED_STORAGE_CONFIG: default: - LocalStorage - - storage_path: /datastorage/registry` + - storage_path: /datastorage/registry DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] DISTRIBUTED_STORAGE_PREFERENCE: - default diff --git a/modules/config-fields-storage-noobaa.adoc b/modules/config-fields-storage-noobaa.adoc index 2563b9fb9..0cde8b339 100644 --- a/modules/config-fields-storage-noobaa.adoc +++ b/modules/config-fields-storage-noobaa.adoc @@ -1,9 +1,12 @@ -[[config-fields-storage-noobaa]] +:_content-type: CONCEPT +[id="config-fields-storage-noobaa"] = OCS/NooBaa +The following YAML shows a sample configuration using an Open Container Storage/NooBaa instance: + [source,yaml] ---- -DISTRIBUTED_STORAGE_CONFIG +DISTRIBUTED_STORAGE_CONFIG: rhocsStorage: - RHOCSStorage - access_key: access_key_here diff --git a/modules/config-fields-storage-rados.adoc b/modules/config-fields-storage-rados.adoc index 09f6bbeeb..f677666c0 100644 --- a/modules/config-fields-storage-rados.adoc +++ b/modules/config-fields-storage-rados.adoc @@ -1,9 +1,12 @@ -[[config-fields-storage-rados]] +:_content-type: CONCEPT +[id="config-fields-storage-rados"] = Ceph / RadosGW Storage / Hitachi HCP +The following YAML shows a sample configuration using Ceph/RadosGW and Hitachi HCP storage: + [source,yaml] ---- -DISTRIBUTED_STORAGE_CONFIG +DISTRIBUTED_STORAGE_CONFIG: radosGWStorage: - RadosGWStorage - access_key: access_key_here diff --git a/modules/config-fields-storage-swift.adoc b/modules/config-fields-storage-swift.adoc index 3bfb2f9f8..3edfa9b5e 100644 --- a/modules/config-fields-storage-swift.adoc +++ b/modules/config-fields-storage-swift.adoc @@ -1,9 +1,12 @@ -[[config-fields-storage-swift]] +:_content-type: CONCEPT +[id="config-fields-storage-swift"] = Swift storage +The following YAML shows a sample configuration using Swift storage: + [source,yaml] ---- -DISTRIBUTED_STORAGE_CONFIG +DISTRIBUTED_STORAGE_CONFIG: swiftStorage: - SwiftStorage - swift_user: swift_user_here diff --git a/modules/config-fields-storage.adoc b/modules/config-fields-storage.adoc index 675f20a5f..9e5de4456 100644 --- a/modules/config-fields-storage.adoc +++ b/modules/config-fields-storage.adoc @@ -1,14 +1,5 @@ -[[config-fields-storage]] +:_content-type: CONCEPT +[id="config-fields-storage"] = Image storage - - - - - - - - - - - +This section details the image storage features and configuration fields that are available with {productname}. \ No newline at end of file diff --git a/modules/config-fields-tag-expiration.adoc b/modules/config-fields-tag-expiration.adoc index d07616c3a..df5adb9e5 100644 --- a/modules/config-fields-tag-expiration.adoc +++ b/modules/config-fields-tag-expiration.adoc @@ -1,18 +1,18 @@ -[[config-fields-tag-expiration]] -= Tag expiration options +:_content-type: CONCEPT +[id="config-fields-tag-expiration"] += Tag expiration configuration fields +The following tag expiration configuration fields are available with {productname}: - -.Tag expiration configuration +.Tag expiration configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_GARBAGE_COLLECTION** | Boolean | Whether garbage collection of repositories is enabled + +| **FEATURE_GARBAGE_COLLECTION** | Boolean | Whether garbage collection of repositories is enabled. + + **Default:** True -| {nbsp} | {nbsp} | {nbsp} | **TAG_EXPIRATION_OPTIONS** + -(Required) | Array of string |The options that users can select for expiration of tags in their namespace (if enabled) + +(Required) | Array of string | If enabled, the options that users can select for expiration of tags in their namespace. + + **Pattern:** + `^[0-9]+(w\|m\|d\|h\|s)$` @@ -22,15 +22,18 @@ **Pattern:** + `^[0-9]+(w\|m\|d\|h\|s)$` + **Default:** `2w` -| {nbsp} | {nbsp} | {nbsp} -| **FEATURE_CHANGE_TAG_EXPIRATION** | Boolean | Whether users and organizations are allowed to change the tag expiration for tags in their namespace + +| **FEATURE_CHANGE_TAG_EXPIRATION** | Boolean | Whether users and organizations are allowed to change the tag expiration for tags in their namespace. + + **Default:** True |=== +[id="example-config-fields-tag-expiration"] +== Example tag expiration configuration + +The following YAML shows a sample tag expiration configuration: -Example: -``` +[source,terminal] +---- DEFAULT_TAG_EXPIRATION: 2w TAG_EXPIRATION_OPTIONS: - 0s @@ -38,4 +41,4 @@ TAG_EXPIRATION_OPTIONS: - 1w - 2w - 4w -``` \ No newline at end of file +---- \ No newline at end of file diff --git a/modules/config-fields-user.adoc b/modules/config-fields-user.adoc index 51e2382be..10872393c 100644 --- a/modules/config-fields-user.adoc +++ b/modules/config-fields-user.adoc @@ -1,8 +1,9 @@ -[[config-fields-user]] +:_content-type: CONCEPT +[id="config-fields-user"] = User configuration fields -.User configuration +.User configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description @@ -21,7 +22,7 @@ | **FEATURE_USER_METADATA** | Boolean | Whether to collect and support user metadata + + **Default:** `false` -| **FEATURE_USERNAME_CONFIRMATION** | Boolean | If set to true, users can confirm their generated usernames + +| **FEATURE_USERNAME_CONFIRMATION** | Boolean | If set to true, users can confirm and modify their initial usernames when logging in via OpenID Connect (OIDC) or a non-database internal authentication provider like LDAP. + **Default:** `true` | **FEATURE_USER_RENAME** | Boolean | If set to true, users can rename their own namespace + @@ -30,7 +31,6 @@ | **FEATURE_INVITE_ONLY_USER_CREATION** | Boolean | Whether users being created must be invited by another user + + **Default:** `false` -| {nbsp} | {nbsp} | {nbsp} | **FRESH_LOGIN_TIMEOUT** | String | The time after which a fresh login requires users to re-enter their password + + **Example**: `5m` @@ -44,4 +44,82 @@ + **Pattern**: `^[0-9]+(w\|m\|d\|h\|s)$` + **Default**: `30m` -|=== \ No newline at end of file + +| **FEATURE_SUPERUSERS_FULL_ACCESS** | Boolean | Grants superusers the ability to read, write, and delete content from other repositories in namespaces that they do not own or have explicit permissions for. + +*Default:* `False` + +| **FEATURE_RESTRICTED_USERS** | Boolean | When set with `RESTRICTED_USERS_WHITELIST`, restricted users cannot create organizations or content in their own namespace. Normal permissions apply for an organization's membership, for example, a restricted user will still have normal permissions in organizations based on the teams that they are members of. + +*Default:* `False` + +| **RESTRICTED_USERS_WHITELIST** | String | When set with `FEATURE_RESTRICTED_USERS: true`, specific users are excluded from the `FEATURE_RESTRICTED_USERS` setting. + +| **GLOBAL_READONLY_SUPER_USERS** | String | When set, grants users of this list read access to all repositories, regardless of whether they are public repositories. + +|=== + +[id="user-config-field-reference"] +== User configuration fields references + +Use the following references to update your `config.yaml` file with the desired configuration field. + +[id="configuring-superusers-full-access"] +=== FEATURE_SUPERUSERS_FULL_ACCESS configuration reference + +[source,yaml] +---- +--- +SUPER_USERS: +- quayadmin +FEATURE_SUPERUSERS_FULL_ACCESS: True +--- +---- + +[id="configuring-global-readonly-super-users"] +=== GLOBAL_READONLY_SUPER_USERS configuration reference + +[source,yaml] +---- +--- +GLOBAL_READONLY_SUPER_USERS: + - user1 +--- +---- + +[id="configuring-feature-restricted-users"] +=== FEATURE_RESTRICTED_USERS configuration reference + +[source,yaml] +---- +--- +AUTHENTICATION_TYPE: Database +--- +--- +FEATURE_RESTRICTED_USERS: true +--- +---- + +[id="configuring-restricted-users-whitelist"] +=== RESTRICTED_USERS_WHITELIST configuration reference + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml` file. + +[source,yaml] +---- +--- +AUTHENTICATION_TYPE: Database +--- +--- +FEATURE_RESTRICTED_USERS: true +RESTRICTED_USERS_WHITELIST: + - user1 +--- +---- + +[NOTE] +==== +When this field is set, whitelisted users can create organizations, or read or write content from the repository even if `FEATURE_RESTRICTED_USERS` is set to `true`. Other users, for example, `user2`, `user3`, and `user4` are restricted from creating organizations, reading, or writing content +==== \ No newline at end of file diff --git a/modules/config-fields-v2-ui.adoc b/modules/config-fields-v2-ui.adoc new file mode 100644 index 000000000..30d65e8f6 --- /dev/null +++ b/modules/config-fields-v2-ui.adoc @@ -0,0 +1,12 @@ +:_content-type: CONCEPT +[id="config-fields-v2-ui"] += User interface v2 configuration field + +.User interface v2 configuration field +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **FEATURE_UI_V2** | Boolean | When set, allows users to try the beta UI environment. + +*Default:* `False` +|=== \ No newline at end of file diff --git a/modules/config-file-intro.adoc b/modules/config-file-intro.adoc index b806dbc2b..b8f1c4041 100644 --- a/modules/config-file-intro.adoc +++ b/modules/config-file-intro.adoc @@ -1,12 +1,14 @@ +:_content-type: CONCEPT +[id="editing-the-configuration-file"] = Editing the configuration file -Deploying the registry in standalone mode requires a minimal configuration - see section xref:config-file-minimal[]. - -The configuration file is validated on startup of the registry, and any issue will be highlighted in the output: - -It is possible to use the configuration API to validate the configuration, but this requires starting the Quay container in config mode - -For changes to take effect, the registry needs to be restarted. +To deploy a standalone instance of {productname}, you must provide the minimal configuration information. The requirements for a minimal configuration can be found in "{productname} minimal configuration." +After supplying the required fields, you can validate your configuration. If there are any issues, they will be highlighted. +[NOTE] +==== +It is possible to use the configuration API to validate the configuration, but this requires starting the Quay container in configuration mode. For more information, see "Using the configuration tool." +==== +For changes to take effect, the registry must be restarted. \ No newline at end of file diff --git a/modules/config-file-location.adoc b/modules/config-file-location.adoc index 1ed2d6e66..78c2f43a1 100644 --- a/modules/config-file-location.adoc +++ b/modules/config-file-location.adoc @@ -1,12 +1,14 @@ +:_content-type: CONCEPT +[id="config-file-location"] = Location of configuration file in a standalone deployment -For a standalone deployment, the `config.yaml` file must be specified when starting the Quay registry. This file is located in the config volume, so in the following example, the config file is located at `$QUAY/config/config.yaml`: +For standalone deployments of {productname}, the `config.yaml` file must be specified when starting the {productname} registry. This file is located in the configuration volume. For example, the configuration file is located at `$QUAY/config/config.yaml` when deploying {productname} by the following command: [subs="verbatim,attributes"] -.... +---- $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} -.... +---- diff --git a/modules/config-file-minimal.adoc b/modules/config-file-minimal.adoc index ac1c2c756..b00653e4b 100644 --- a/modules/config-file-minimal.adoc +++ b/modules/config-file-minimal.adoc @@ -1,24 +1,23 @@ -[[config-file-minimal]] +:_content-type: CONCEPT +[id="config-file-minimal"] = Minimal configuration -For a standalone deployment, configuration options are required for the following features: +The following configuration options are required for a standalone deployment of {productname}: * Server hostname * HTTP or HTTPS -* Authentication type, for example, Database or LDAP +* Authentication type, for example, Database or Lightweight Directory Access Protocol (LDAP) * Secret keys for encrypting data * Storage for images * Database for metadata * Redis for build logs and user events * Tag expiration options - +[id="sample-config-file-minimal"] == Sample minimal configuration file -A sample minimal config file, using local storage for images, is shown below: - +The following example shows a sample minimal configuration file that uses local storage for images: -.$QUAY/config/config.yaml [source,yaml] ---- AUTHENTICATION_TYPE: Database @@ -26,6 +25,7 @@ BUILDLOGS_REDIS: host: quay-server.example.com password: strongpassword port: 6379 + ssl: false DATABASE_SECRET_KEY: 0ce4f796-c295-415b-bf9d-b315114704b8 DB_URI: postgresql://quayuser:quaypass@quay-server.example.com:5432/quay DEFAULT_TAG_EXPIRATION: 2w @@ -48,35 +48,35 @@ TAG_EXPIRATION_OPTIONS: - 4w USER_EVENTS_REDIS: host: quay-server.example.com - password: strongpassword port: 6379 + ssl: false ---- - [NOTE] ==== -The `SETUP_COMPLETE` field indicates that the configuration has been validated. You should use the config editor tool to validate your configuration before starting the registry. +The `SETUP_COMPLETE` field indicates that the configuration has been validated. You should use the configuration editor tool to validate your configuration before starting the registry. ==== - +[id="config-local-storage"] == Local storage -Using local storage for images is only recommended when deploying a registry for proof of concept purposes. In this case, storage is specified on the command line when starting the registry, mapping a local directory `$QUAY/storage` to the `/datastorage` path in the container: +Using local storage for images is only recommended when deploying a registry for proof of concept purposes. + +When configuring local storage, storage is specified on the command line when starting the registry. The following command maps a local directory, `$QUAY/storage` to the `datastorage` path in the container: [subs="verbatim,attributes"] -.... +---- $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} -.... - - +---- +[id="config-cloud-storage"] == Cloud storage -Storage configuration is detailed in the section xref:config-fields-storage[Image storage]. It is useful to compare the difference when using cloud storage, for example, on Google Cloud Platform: +Storage configuration is detailed in the xref:config-fields-storage[Image storage] section. For some users, it might be useful to compare the difference between Google Cloud Platform and local storage configurations. For example, the following YAML presents a Google Cloud Platform storage configuration: .$QUAY/config/config.yaml [source,yaml] @@ -93,15 +93,13 @@ DISTRIBUTED_STORAGE_PREFERENCE: - default ---- - -When starting the registry using cloud storage, no configuration is required on the command line: - +When starting the registry using cloud storage, no configuration is required on the command line. For example: [subs="verbatim,attributes"] -.... +---- $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ {productrepo}/{quayimage}:{productminv} -.... +---- diff --git a/modules/config-intro.adoc b/modules/config-intro.adoc index a69f97a2d..ebeba98b3 100644 --- a/modules/config-intro.adoc +++ b/modules/config-intro.adoc @@ -1,16 +1,17 @@ -[[config-intro]] -= Getting started with configuration +:_content-type: CONCEPT +[id="config-intro"] += Getting started with {productname} configuration -{productname} can be deployed in a standalone manner, or on an existing OpenShift cluster using the Operator. -The methods you use to create, retrieve, update and validate the {productname} configuration vary slightly, depending on the type of deployment you are using. However, the core configuration options are fundamentally the same for all types of deployment, and these options can be manipulated: +{productname} can be deployed by an independent, standalone configuration, or by using the {ocp} {productname} Operator. -* Directly, by editing the `config.yaml` file. See the section xref:Editing the configuration file[Editing the configuration file]. -* Programmatically, using the configuration API. See the section xref:Using the configuration API[Using the configuration API]. -* Visually, using the configuration tool UI. See the section xref:Using the configuration tool[Using the configuration tool]. +How you create, retrieve, update, and validate the {productname} configuration varies depending on the type of deployment you are using. However, the core configuration options are the same for either deployment type. Core configuration can be set by one of the following options: +* Directly, by editing the `config.yaml` file. See "Editing the configuration file" for more information. +* Programmatically, by using the configuration API. See "Using the configuration API" for more information. +* Visually, by using the configuration tool UI. See "Using the configuration tool" for more information. -You can install Quay on OpenShift using the Operator, without the need to supply any initial configuration, as the Operator will supply sensible defaults to deploy the registry. For a standalone deployment, however, you must supply a minimal level of configuration before the registry can be started. The minimal requirements can be determined using the xref:Retrieving the current configuration[configuration API] and are documented in the section - -Once you have Quay deployed with your initial configuration, you should retrieve and save the full configuration from the running system as it may contain extra, generated values that you will need in future when restarting or upgrading your system. +For standalone deployments of {productname}, you must supply the minimum required configuration parameters before the registry can be started. The minimum requirements to start a {productname} registry can be found in the "Retrieving the current configuration" section. +If you install {productname} on {ocp} using the {productname} Operator, you do not need to supply configuration parameters because the {productname} Operator supplies default information to deploy the registry. +After you have deployed {productname} with the desired configuration, you should retrieve, and save, the full configuration from your deployment. The full configuration contains additional generated values that you might need when restarting or upgrading your system. diff --git a/modules/config-preconfigure-automation-intro.adoc b/modules/config-preconfigure-automation-intro.adoc index 9f93def6d..662dad123 100644 --- a/modules/config-preconfigure-automation-intro.adoc +++ b/modules/config-preconfigure-automation-intro.adoc @@ -1,5 +1,8 @@ -[[config-preconfigure-automation-intro]] +:_content-type: CONCEPT +[id="config-preconfigure-automation-intro"] = Automation options -* xref:config-preconfigure-automation[Pre-configuring Quay for automation] -// * xref:api-first-user[Using the API to create the first user] +The following sections describe the available automation options for {productname} deployments: + +* xref:config-preconfigure-automation[Pre-configuring {productname} for automation] +* xref:using-the-api-to-create-first-user[Using the API to create the first user] diff --git a/modules/config-preconfigure-automation.adoc b/modules/config-preconfigure-automation.adoc index 13c5dd0c7..205dc71d5 100644 --- a/modules/config-preconfigure-automation.adoc +++ b/modules/config-preconfigure-automation.adoc @@ -1,35 +1,41 @@ -[[config-preconfigure-automation]] -= Pre-configuring Quay for automation +:_content-type: CONCEPT +[id="config-preconfigure-automation"] += Pre-configuring {productname} for automation -Quay has a number of configuration options that support automation. These options can be set before deployment, to minimize the need to interact with the user interface. +{productname} has several configuration options that support automation. These options can be set before deployment to minimize the need to interact with the user interface. +[id="allowing-the-api-to-create-first-user"] == Allowing the API to create the first user -Set the config option `FEATURE_USER_INITIALIZE` to `true`, so that you can use the API `/api/v1/user/initialize` to create the first user. This API endpoint does not require authentication, unlike all other registry API calls which require an OAuth token which is generated by an OAuth application in an existing organization. +To create the first user using the `/api/v1/user/initialize` API, set the `FEATURE_USER_INITIALIZE` parameter to `true`. Unlike all other registry API calls which require an OAuth token that is generated by an OAuth application in an existing organization, the API endpoint does not require authentication. -Once you have deployed Quay, you can use the API to create a user, for example, `quayadmin`, provided no other users have already been created. For more information, see the section on - -// xref:api-first-user[Creating the first user using the API] +After you have deployed {productname}, you can use the API to create a user, for example, `quayadmin`, assuming that no other users have already been created. For more information see xref:using-the-api-to-create-first-user[Using the API to create the first user]. +[id="enabling-general-api-access"] == Enabling general API access -Set the config option `BROWSER_API_CALLS_XHR_ONLY` to `false`, to allow general access to the Quay registry API. - -== Adding a super user - -While you cannot create a user until after deployment, it is convenient to ensure that first user is an administrator with full permissions. It is easier to configure this in advance, using the `SUPER_USER` configuration object. - -== Restricting user creation +Set the config option `BROWSER_API_CALLS_XHR_ONLY` to `false` to allow general access to the {productname} registry API. -Once you have configured a super user, you can restrict the ability to create new users to the super user group. Set the `FEATURE_USER_CREATION` to `false` to restrict user creation. +[id="adding-super-user"] +== Adding a superuser +After deploying {productname}, you can create a user. It is suggested that the first user be given administrator privileges with full permissions. Full permissions can be configured in advance by using the `SUPER_USER` configuration object. For example: +[source,yaml] +---- +... +SERVER_HOSTNAME: quay-server.example.com +SETUP_COMPLETE: true +SUPER_USERS: + - quayadmin +... +---- -== Suggested configuration for automation +[id="restricting-user-creation"] +== Restricting user creation -Create a `config.yaml` configuration file that includes the appropriate settings: +After you have configured a super user, you can restrict the ability to create new users to the super user group. Set the `FEATURE_USER_CREATION` to `false` to restrict user creation. For example: -.config.yaml [source,yaml] ---- ... @@ -41,30 +47,53 @@ FEATURE_USER_CREATION: false ... ---- +[id="enabling-new-functionality-38"] +== Enabling new functionality in {productname} 3.8 -== Deploying the Operator using the initial configuration +To use new {productname} 3.8 functionality, enable some or all of the following features: -. Create a Secret using the configuration file -+ +[source,yaml] ---- -$ oc create secret generic --from-file config.yaml=./config.yaml init-config-bundle-secret +... +FEATURE_UI_V2: true +FEATURE_LISTEN_IP_VERSION: +FEATURE_SUPERUSERS_FULL_ACCESS: true +GLOBAL_READONLY_SUPER_USERS: + - +FEATURE_RESTRICTED_USERS: true +RESTRICTED_USERS_WHITELIST: + - +... ---- -. Create a QuayRegistry YAML file `quayregistry.yaml`, identifying the unmanaged components and also referencing the created Secret, for example: -+ -.quayregistry.yaml + +[id="enabling-new-functionality-37"] +== Enabling new functionality in {productname} 3.7 + +To use new {productname} 3.7 functionality, enable some or all of the following features: + [source,yaml] ---- -apiVersion: quay.redhat.com/v1 -kind: QuayRegistry -metadata: - name: example-registry - namespace: quay-enterprise -spec: - configBundleSecret: init-config-bundle-secret ----- -. Deploy the registry: -+ +... +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_BUILD_SUPPORT: true +FEATURE_PROXY_CACHE: true +FEATURE_STORAGE_REPLICATION: true +DEFAULT_SYSTEM_REJECT_QUOTA_BYTES: 102400000 +... ---- -$ oc create -f quayregistry.yaml + +[id="suggested-configuration-for-automation"] +== Suggested configuration for automation + +The following `config.yaml` parameters are suggested for automation: + +[source,yaml] ---- -. Create the first user, `quayadmin`, using the API +... +FEATURE_USER_INITIALIZE: true +BROWSER_API_CALLS_XHR_ONLY: false +SUPER_USERS: +- quayadmin +FEATURE_USER_CREATION: false +... +---- \ No newline at end of file diff --git a/modules/config-ui-database.adoc b/modules/config-ui-database.adoc index 9a87208c3..47457560b 100644 --- a/modules/config-ui-database.adoc +++ b/modules/config-ui-database.adoc @@ -6,7 +6,7 @@ image:ui-database-choice.png[Database choice] [NOTE] ==== -The MySQL and MariaDB databases have been deprecated as of {PRODUCTNAME} 3.6. Support for these databases will be removed in a future version of Red Hat Quay. If starting a new Red Hat Quay installation, it is strongly recommended to use PostgreSQL. +The MySQL and MariaDB databases have been deprecated as of {PRODUCTNAME} 3.6. Support for these databases will be removed in a future version of {productname}. If starting a new {productname} installation, it is strongly recommended to use PostgreSQL. ==== == PostgreSQL configuration diff --git a/modules/config-ui-intro.adoc b/modules/config-ui-intro.adoc index 7aeae64ae..2dc0da197 100644 --- a/modules/config-ui-intro.adoc +++ b/modules/config-ui-intro.adoc @@ -1,2 +1,4 @@ +:_content-type: CONCEPT +[id="config-using-tool"] = Using the configuration tool diff --git a/modules/config-ui-storage-georepl.adoc b/modules/config-ui-storage-georepl.adoc index 833ba0296..407fa72da 100644 --- a/modules/config-ui-storage-georepl.adoc +++ b/modules/config-ui-storage-georepl.adoc @@ -1,26 +1,35 @@ -[[config-ui-storage-georepl]] +[id="enable-storage-replication-standalone"] + = Enable storage replication - standalone Quay -. Scroll down to the section -entitled `Registry Storage`. -. Click `Enable Storage Replication`. -. Add each of the storage engines to which data will be replicated. -All storage engines to be used must be listed. -. If complete replication of all images to all storage engines is -required, under each storage engine configuration click `Replicate to -storage engine by default`. This will ensure that all images are -replicated to that storage engine. To instead enable per-namespace -replication, please contact support. -. When you are done, click `Save Configuration Changes`. -Configuration changes will take effect the next time {productname} restarts. - -. After adding storage and enabling “Replicate to storage engine by default” for Georeplications, you need to sync existing image data across all storage. -To do this, you need to `oc exec` (or docker/kubectl exec) into the container -and run: +Use the following procedure to enable storage replication on {productname}. + +.Procedure + +. In your {productname} config editor, locate the *Registry Storage* section. + +. Click *Enable Storage Replication*. + +. Add each of the storage engines to which data will be replicated. All storage engines to be used must be listed. + +. If complete replication of all images to all storage engines is required, click *Replicate to storage engine by default* under each storage engine configuration. This ensures that all images are replicated to that storage engine. ++ +[NOTE] +==== +To enable per-namespace replication, contact {productname} support. +==== + +. When finished, click *Save Configuration Changes*. The configuration changes will take effect after {productname} restarts. + +. After adding storage and enabling *Replicate to storage engine by default* for geo-replication, you must sync existing image data across all storage. To do this, you must `oc exec` (alternatively, `docker exec` or `kubectl exec`) into the container and enter the following commands: + -``` +[source,terminal] +---- # scl enable python27 bash # python -m util.backfillreplication -``` +---- + +[NOTE] +==== This is a one time operation to sync content after adding new storage. +==== \ No newline at end of file diff --git a/modules/config-updates-36.adoc b/modules/config-updates-36.adoc index 40f12c7d7..adc473303 100644 --- a/modules/config-updates-36.adoc +++ b/modules/config-updates-36.adoc @@ -1,24 +1,35 @@ -[[config-updates-36]] -= Configuration updates for Quay 3.6 +:_content-type: CONCEPT +[id="config-updates-36"] += Configuration updates for {productname} 3.6 -== New configuration fields - -* **FEATURE_EXTENDED_REPOSITORY_NAMES:** Support for nested repositories and extended repository names has been added. This change allows the use of `/` in repository names needed for certain OpenShift Container Platform use cases. For more information, see xref:config-fields-nested-repositories[Configuring nested repositories] - -* **FEATURE_USER_INITIALIZE:** If set to true, the first User account may be created via API `/api/v1/user/initialize`. For more information, see xref:config-preconfigure-automation[Pre-configuring Quay for automation] +[id="new-configuration-fields-36"] +== New configuration fields -* **ALLOWED_OCI_ARTIFACT_TYPES:** Helm, cosign, and ztsd compression scheme artifacts are built into {productname} {producty} by default. For any other OCI media type that is not supported by default, you can add them to the `ALLOWED_OCI_ARTIFACT_TYPES` configuration in Quay's `config.yaml` For more information, see xref:other-oci-artifacts-with-quay[Adding other OCI media types to Quay] +The following configuration fields have been introduced with {productname} 3.6: +[options="header"] +|=== +|Parameter |Description +|**FEATURE_EXTENDED_REPOSITORY_NAMES** |Support for nested repositories and extended repository names has been added. This change allows the use of `/` in repository names needed for certain {ocp} use cases. For more information, see xref:config-fields-nested-repositories[Configuring nested repositories]. +|**FEATURE_USER_INITIALIZE** |If set to true, the first `User` account can be created by the API `/api/v1/user/initialize`. For more information, see xref:config-preconfigure-automation[Pre-configuring {productname} for automation]. -* **CREATE_PRIVATE_REPO_ON_PUSH:** Registry users now have the option to set `CREATE_PRIVATE_REPO_ON_PUSH` in their config.yaml to `True` or `False` depending on their security needs. +| **ALLOWED_OCI_ARTIFACT_TYPES** |Helm, cosign, and ztsd compression scheme artifacts are built into {productname} 3.6 by default. For any other Open Container Initiative (OCI) media types that are not supported by default, you can add them to the `ALLOWED_OCI_ARTIFACT_TYPES` configuration in Quay's `config.yaml` For more information, see xref:other-oci-artifacts-with-quay[Adding other OCI media types to Quay]. -* **CREATE_NAMESPACE_ON_PUSH:** Pushing to a non-existent organization can now be configured to automatically create the organization. +| **CREATE_PRIVATE_REPO_ON_PUSH** |Registry users now have the option to set `CREATE_PRIVATE_REPO_ON_PUSH` in their `config.yaml` to `True` or `False` depending on their security needs. +| **CREATE_NAMESPACE_ON_PUSH** |Pushing to a non-existent organization can now be configured to automatically create the organization. +|=== +[id="deprecated-configuration-fields-36"] +== Deprecated configuration fields +The following configuration fields have been deprecated with {productname} 3.6: -== Deprecated configuration fields +[options="header"] +|=== +|Parameter |Description +| *FEATURE_HELM_OCI_SUPPORT* |This option has been deprecated and will be removed in a future version of {productname}. In {productname} 3.6, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their `config.yaml` files to enable support. -* *FEATURE_HELM_OCI_SUPPORT*: This option has been deprecated and will be removed in a future version of {productname}. In {productname} {producty}, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. +|=== \ No newline at end of file diff --git a/modules/config-updates-37.adoc b/modules/config-updates-37.adoc new file mode 100644 index 000000000..96c38a426 --- /dev/null +++ b/modules/config-updates-37.adoc @@ -0,0 +1,35 @@ +:_content-type: CONCEPT +[id="config-updates-37"] += Configuration updates for Quay 3.7 + +[id="new-configuration-fields-377"] +== New configuration fields for {productname} 3.7.7 + +[options="header"] +|=== + +|Field |Type |Description + +|**REPO_MIRROR_ROLLBACK** | Boolean | When set to `true`, the repository rolls back after a failed mirror attempt. + +*Default*: `false` + +|=== + + +[id="new-configuration-fields-37"] +== New configuration fields + +The following configuration fields have been introduced with {productname} 3.7: + +[options="header"] +|=== + +|Parameter |Description +| **FEATURE_QUOTA_MANAGEMENT** | Quota management is now supported. With this feature, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. For more information about quota management, see link:https://access.redhat.com//documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[{productname} Quota management and enforcement]. + +| **DEFAULT_SYSTEM_REJECT_QUOTA_BYTES** |The quota size to apply to all organizations and users. For more information about quota management, see link:https://access.redhat.com//documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[{productname} Quota management and enforcement]. + +| **FEATURE_PROXY_CACHE** |Using {productname} to proxy a remote organization is now supported. With this feature, {productname} will act as a proxy cache to circumvent pull-rate limitations from upstream registries. For more information about quota management, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#quay-as-cache-proxy[{productname} as proxy cache for upstream registries]. + +|=== \ No newline at end of file diff --git a/modules/config-updates-38.adoc b/modules/config-updates-38.adoc new file mode 100644 index 000000000..19c4af700 --- /dev/null +++ b/modules/config-updates-38.adoc @@ -0,0 +1,39 @@ +:_content-type: CONCEPT +[id="config-updates-38"] += Configuration updates for Quay 3.8 + +The following configuration fields have been introduced with {productname} 3.8: + +.{productname} 3.8 configuration fields +[cols="2a,1a,2a",options="header"] +|=== + +|Field | Type |Description +| xref:reference-miscellaneous-v2-ui[**FEATURE_UI_V2**] | Boolean | When set, allows users to try the beta UI environment. + +*Default:* `False` + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#proc_manage-ipv6-dual-stack[**FEATURE_LISTEN_IP_VERSION**] | String | Enables IPv4, IPv6, or dual-stack protocol family. This configuration field must be properly set, otherwise {productname} fails to start. + +*Default:* `IPv4` + +*Additional configurations:* `IPv6`, `dual-stack` + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-super-users-enabling[**LDAP_SUPERUSER_FILTER**] | String | Subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as superusers when {productname} uses LDAP as its authentication provider. + +With this field, administrators can add or remove superusers without having to update the {productname} configuration file and restart their deployment. + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-restricted-users-enabling[**LDAP_RESTRICTED_USER_FILTER**] | String | Subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. + +| xref:configuring-superusers-full-access[**FEATURE_SUPERUSERS_FULL_ACCESS**] | Boolean | Grants superusers the ability to read, write, and delete content from other repositories in namespaces that they do not own or have explicit permissions for. + +*Default:* `False` + +| xref:configuring-global-readonly-super-users[**GLOBAL_READONLY_SUPER_USERS**] | String | When set, grants users of this list read access to all repositories, regardless of whether they are public repositories. + +| xref:configuring-feature-restricted-users[**FEATURE_RESTRICTED_USERS**] | Boolean | When set with `RESTRICTED_USERS_WHITELIST`, restricted users cannot create organizations or content in their own namespace. Normal permissions apply for an organization's membership, for example, a restricted user will still have normal permissions in organizations based on the teams that they are members of. + +*Default:* `False` + +| xref:configuring-restricted-users-whitelist[**RESTRICTED_USERS_WHITELIST**] | String | When set with `FEATURE_RESTRICTED_USERS: true`, specific users are excluded from the `FEATURE_RESTRICTED_USERS` setting. +|=== diff --git a/modules/configuring-clair-disconnected-environment.adoc b/modules/configuring-clair-disconnected-environment.adoc new file mode 100644 index 000000000..139d8d602 --- /dev/null +++ b/modules/configuring-clair-disconnected-environment.adoc @@ -0,0 +1,39 @@ +:_content-type: PROCEDURE +[id="configuring-clair-disconnected-environment"] += Configuring Clair for disconnected environments + +Use the following procedure to configure Clair for a disconnected environment. + +.Prerequisites + +* You have installed the `clairctl` tool to be run as a binary, or by the Clair container image. + +.Procedure + +. In your `config.yaml` file, set your Clair configuration to disable updaters from running: ++ +.config.yaml +[source,yaml] +---- +matcher: + disable_updaters: true +---- + +. Export the latest updater data to a local archive. The following command assumes that your Clair configuration is in `/etc/clairv4/config/config.yaml` ++ +[subs="verbatim,attributes"] +---- +$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml export-updaters /updaters/updaters.gz +---- ++ +[NOTE] +==== +You must explicitly reference the Clair configuration. This creates the updater archive in `/etc/clairv4/updaters/updaters.gz`. To ensure that the archive was created without any errors from the source databases, you can use the `--strict` flag with `clairctl`. The archive file should be copied over to a volume that is accessible from the disconnected host running Clair. +==== + +. From the disconnected host, use the following command to import the archive into Clair: ++ +[subs="verbatim,attributes"] +---- +$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml import-updaters /updaters/updaters.gz +---- diff --git a/modules/configuring-custom-clair-database-managed.adoc b/modules/configuring-custom-clair-database-managed.adoc new file mode 100644 index 000000000..28bb1d11e --- /dev/null +++ b/modules/configuring-custom-clair-database-managed.adoc @@ -0,0 +1,77 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="configuring-custom-clair-database-managed"] += Configuring a custom Clair database with a managed Clair configuration + +The {productname} Operator for {ocp} allows users to provide their own Clair database. + +Use the following procedure to create a custom Clair database. + +.Procedure + +. Create a Quay configuration bundle secret that includes the `clair-config.yaml` by entering the following command: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config.yaml --from-file extra_ca_cert_rds-ca-2019-root.pem=./rds-ca-2019-root.pem --from-file clair-config.yaml=./clair-config.yaml config-bundle-secret +---- ++ +.Example Clair `config.yaml` file ++ +[source,yaml] +---- +indexer: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslmode=disable + layer_scan_concurrency: 6 + migrations: true + scanlock_retry: 11 +log_level: debug +matcher: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslmode=disable + migrations: true +metrics: + name: prometheus +notifier: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslmode=disable + migrations: true +---- ++ +[NOTE] +==== +* The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`. +* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config]. +==== + +. Add the `clair-config.yaml` file to your bundle secret, for example: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: config-bundle-secret + namespace: quay-enterprise +data: + config.yaml: + clair-config.yaml: +---- ++ +[NOTE] +==== +* When updated, the provided `clair-config.yaml` file is mounted into the Clair pod. Any fields not provided are automatically populated with defaults using the Clair configuration module. +==== + +. You can check the status of your Clair pod by clicking the commit in the *Build History* page, or by running `oc get pods -n `. For example: ++ +---- +$ oc get pods -n +---- ++ +.Example output +---- +NAME READY STATUS RESTARTS AGE +f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s +---- \ No newline at end of file diff --git a/modules/configuring-custom-clair-database.adoc b/modules/configuring-custom-clair-database.adoc new file mode 100644 index 000000000..8395574b5 --- /dev/null +++ b/modules/configuring-custom-clair-database.adoc @@ -0,0 +1,85 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="configuring-custom-clair-database"] += Configuring a custom Clair database with an unmanaged Clair database + +The {productname} Operator for {ocp} allows users to provide their own Clair database. + +Use the following procedure to create a custom Clair database. + +[NOTE] +==== +The following procedure sets up Clair with SSL/TLS certifications. To view a similar procedure that does not set up Clair with SSL/TSL certifications, see "Configuring a custom Clair database with a managed Clair configuration". +==== + +.Procedure + +. Create a Quay configuration bundle secret that includes the `clair-config.yaml` by entering the following command: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config.yaml --from-file extra_ca_cert_rds-ca-2019-root.pem=./rds-ca-2019-root.pem --from-file clair-config.yaml=./clair-config.yaml --from-file ssl.cert=./ssl.cert --from-file ssl.key=./ssl.key config-bundle-secret +---- ++ +.Example Clair `config.yaml` file ++ +[source,yaml] +---- +indexer: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca + layer_scan_concurrency: 6 + migrations: true + scanlock_retry: 11 +log_level: debug +matcher: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca + migrations: true +metrics: + name: prometheus +notifier: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca + migrations: true +---- ++ +[NOTE] +==== +* The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`. +* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config]. +==== + +. Add the `clair-config.yaml` file to your bundle secret, for example: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: config-bundle-secret + namespace: quay-enterprise +data: + config.yaml: + clair-config.yaml: + extra_ca_cert_: + clair-ssl.crt: >- + clair-ssl.key: >- +---- ++ +[NOTE] +==== +When updated, the provided `clair-config.yaml` file is mounted into the Clair pod. Any fields not provided are automatically populated with defaults using the Clair configuration module. +==== + +. You can check the status of your Clair pod by clicking the commit in the *Build History* page, or by running `oc get pods -n `. For example: ++ +---- +$ oc get pods -n +---- ++ +.Example output +---- +NAME READY STATUS RESTARTS AGE +f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s +---- \ No newline at end of file diff --git a/modules/content-distrib-intro.adoc b/modules/content-distrib-intro.adoc index c9f2e64be..e6242a880 100644 --- a/modules/content-distrib-intro.adoc +++ b/modules/content-distrib-intro.adoc @@ -1,10 +1,11 @@ -[[content-distrib-intro]] -= Content distribution +:_content-type: CONCEPT +[id="content-distrib-intro"] += Content distribution with {productname} Content distribution features in {productname} include: -* xref:mirroring-intro[Repository mirroring] +* xref:arch-mirroring-intro[Repository mirroring] * xref:georepl-intro[Geo-replication] -* xref:airgap-intro[Deployment in air-gapped environments] +* xref:arch-airgap-intro[Deployment in air-gapped environments] diff --git a/modules/core-distinct-registries.adoc b/modules/core-distinct-registries.adoc index 6181e010e..6f5444e70 100644 --- a/modules/core-distinct-registries.adoc +++ b/modules/core-distinct-registries.adoc @@ -1,28 +1,17 @@ -[[core-distinct-registries]] -= Single versus multiple registries +:_content-type: CONCEPT +[id="core-distinct-registries"] += Single compared to multiple registries -Many users consider running multiple, distinct registries while the preferred approach with Quay is to have a single, shared registry. The following table addresses the reasons why a user might want to run multiple registries and how these requirements are addressed in Quay: +Many users consider running multiple, distinct registries. The preferred approach with {productname} is to have a single, shared registry: -[cols="2a,2a",options="header"] -|=== -| Multiple registries | Quay approach -| Clear separation between Dev and Prod | Use organizations and repositories instead + RBAC -Clear separation by content origin + -(internal/external) | Use organizations and repositories instead + RBAC -Required to test registry upgrades given the criticality of the registry for running apps | -Quay Operator automates updates, both patch releases as well as minor or major updates that require an ordered sequence of steps to complete -| Separate registry in each datacenter (DC) | Quay can serve content to multiple physically close DCs + - + -HA can stretch across DCs (requires load balancers) + - + -Quay Geo-Replication can stretch across physically distant DCs (requires global load balancer or DNS-based geo-aware load-balancing) -| Separate registry for each cluster | Quay can serve content to thousands of clusters -| Scalability concerns over single registry | Quay scales nearly without limits + -(The underlying code base is proven to work at scale at Quay.io) -| Distinct registry configurations | In this scenario it might make sense to run two distinct registries -|=== +- If you want a clear separation between development and production images, or a clear separation by content origin, for example, keeping third-party images distinct from internal ones, you can use organizations and repositories, combined with role-based access control (RBAC), to achieve the desired separation. -**Recommendation:** +- Given that the image registry is a critical component in an enterprise environment, you may be tempted to use distinct deployments to test upgrades of the registry software to newer versions. The {productname} Operator updates the registry for patch releases as well as minor or major updates. This means that any complicated procedures are automated and, as a result, there is no requirement for you to provision multiple instances of the registry to test the upgrade. -Running a shared registry helps you to save storage, infrastructure and operational costs. -A dedicated registry would be really needed in very specific circumstances. +- With {productname}, there is no need to have a separate registry for each cluster you deploy. {productname} is proven to work at scale at link:https://quay.io[Quay.io], and can serve content to thousands of clusters. + +- Even if you have deployments in multiple data centers, you can still use a single {productname} instance to serve content to multiple physically-close data centers, or use the HA functionality with load balancers to stretch across data centers. Alternatively, you can use the {productname} geo-replication feature to stretch across physically distant data centers. This requires the provisioning of a global load balancer or DNS-based geo-aware load balancing. + +- One scenario where it may be appropriate to run multiple distinct registries, is when you want to specify different configuration for each registry. + +In summary, running a shared registry helps you to save storage, infrastructure and operational costs, but a dedicated registry might be needed in specific circumstances. diff --git a/modules/core-example-deployment.adoc b/modules/core-example-deployment.adoc index 1e51cb62a..161666078 100644 --- a/modules/core-example-deployment.adoc +++ b/modules/core-example-deployment.adoc @@ -1,12 +1,12 @@ -[[core-example-deployment]] -= {productname} example deployments +:_content-type: CONCEPT +[id="core-example-deployment"] += {productname} example deployments -The following image shows two {productname} example deployments: - -* Proof of concept, single node -* Highly available, multi-node in single data center -* Highly available, multi-node across multiple data centers +The following image shows three possible deployments for {productname}: +.Deployment examples image:178_Quay_architecture_0821_deployment_ex1.png[{productname} deployment example] - +Proof of Concept:: Running {productname}, Clair, and mirroring on a single node, with local image storage and local database +Single data center:: Running highly available {productname}, Clair ,and mirroring, on multiple nodes, with HA database and image storage +Multiple data centers:: Running highly available {productname}, Clair, and mirroring, on multiple nodes in multiple data centers, with HA database and image storage \ No newline at end of file diff --git a/modules/core-infrastructure.adoc b/modules/core-infrastructure.adoc index e1f39ca02..8fe7f6561 100644 --- a/modules/core-infrastructure.adoc +++ b/modules/core-infrastructure.adoc @@ -1,48 +1,55 @@ -= Infrastructure -Quay runs on any physical or virtual infrastructure, both on-premise or public cloud. Deployments range from simple to massively scaled, including: +:_content-type: CONCEPT +[id="arch-quay-infrastructure"] += {productname} infrastructure -* All-in-one setup on a developer laptop -* Highly available on Virtual Machines or on OpenShift -* Geographically dispersed setup across multiple availability zones and regions +{productname} runs on any physical or virtual infrastructure, both on premise or public cloud. Deployments range from simple to massively scaled, like the following: -== Running Quay on standalone hosts +* All-in-one setup on a developer notebook +* Highly available on virtual machines or on {ocp} +* Geographically dispersed across multiple availability zones and regions -* Poof-of-concept deployment, where Quay runs on a machine with image storage and containerized database, Redis and optionally, Clair security scanning (scanning only works with object storage) -* Highly available setups running Quay and Clair in containers across multiple hosts, using `systemd` to ensure restart on failure/reboot -* High availability setups on standalone hosts require customer-provided load balancers, either low-level TCP load balancers or Application Load Balancers capable of terminating TLS +[id="arch-quay-standalone-hosts"] +== Running {productname} on standalone hosts -Standalone deployment is a manual process, but it can easily be automated by the customer, for example, using Ansible. All standalone hosts require valid RHEL subscriptions. +You can automate the standalone deployment process by using Ansible or another automation suite. All standalone hosts require valid a {rhel} subscription. -== Running Quay on OpenShift +Proof of Concept deployment:: {productname} runs on a machine with image storage, containerized database, Redis, and optionally, Clair security scanning. -* Automated deployment and Day 2 management of Red Hat Quay with customization options -* Quay Operator can manage Quay and all dependencies -* Automated scaling and updates -* Integration with existing OpenShift processes like GitOps, monitoring, alerting, logging -* Can provide out-of-the-box object storage with limited availability, backed by the Multi-Cloud Object Gateway (NooBaa), as part of the ODF Operator (no additional subscription required) -* Can leverage scale-out, high availability object storage provided by the ODF Operator (additional subscription required) +Highly available setups:: {productname} and Clair run in containers across multiple hosts. You can use `systemd` units to ensure restart on failure or reboot. ++ +High availability setups on standalone hosts require customer-provided load balancers, either low-level TCP load balancers or application load balancers, capable of terminating TLS. -Quay can run on OpenShift infra nodes, meaning no further subscriptions are required. +[id="arch-quay-openshift"] +== Running {productname} on OpenShift -== Running Quay outside OpenShift +The {productname} Operator for {ocp} provides the following features: -While the Quay Operator ensures seamless deployment and management of Quay running on OpenShift, it is also possible to run Quay in standalone mode (see above) and then serve content to one or many OpenShift clusters, wherever they are running. +* Automated deployment and management of {productname} with customization options +* Management of {productname} and all of its dependencies +* Automated scaling and updates +* Integration with existing {ocp} processes like GitOps, monitoring, alerting, logging +* Provision of object storage with limited availability, backed by the multi-cloud object gateway (NooBaa), as part of the Red Hat OpenShift Data Foundation (ODF) Operator. This service does not require an additional subscription. +* Scaled-out, high availability object storage provided by the ODF Operator. This service requires an additional subscription. -image:178_Quay_architecture_0821_deployment_ex2.png[Quay outside OpenShift] +{productname} can run on {ocp} infrastructure nodes. As a result, no further subscriptions are required. Running {productname} on {ocp} has the following benefits: -A number of operators are available to help integrate standalone Quay with OpenShift: +* **Zero to Hero:** Simplified deployment of {productname} and associated components means that you can start using the product immediately +* **Scalability:** Use cluster compute capacity to manage demand through automated scaling, based on actual load +* **Simplified Networking:** Automated provisioning of load balancers and traffic ingress secured through HTTPS using {ocp} TLS certificates and Routes +* **Declarative configuration management:** Configurations stored in CustomResource objects for GitOps-friendly lifecycle management +* **Repeatability:** Consistency regardless of the number of replicas of {productname} and Clair +* **OpenShift integration:** Additional services to use {ocp} Monitoring and Alerting facilities to manage multiple {productname} deployments on a single cluster -* **Quay Cluster Security Operator:** relays Quay vulnerability scanning results into the OpenShift Console -* **Quay Bridge Operator:** ensures seamless integration and user experience for using Quay with OpenShift in conjunction with OpenShift Builds and ImageStreams +[id="arch-integrating-standalone-ocp"] +== Integrating standalone {productname} with {ocp} +While the {productname} Operator ensures seamless deployment and management of {productname} running on {ocp}, it is also possible to run {productname} in standalone mode and then serve content to one or many {ocp} clusters, wherever they are running. -== Benefits of running Quay on OpenShift +.Integrating standalone {productname} with {ocp} +image:178_Quay_architecture_0821_deployment_ex2.png[Integrating standalone {productname} with {ocp}] -* **Zero to Hero:** Simplified deployment of Quay and associated components means that you can start using the product immediately -* **Scalability:** Leverage cluster compute capacity to manage demand via automated scaling, based on actual load -* **Simplified Networking:** Automated provisioning of load balancers and traffic ingress secured via HTTPS using OpenShift TLS certificates and Routes -* **Declarative configuration management:** Configurations stored in in CustomResource objects for GitOps-friendly lifecycle management -* **Repeatability:** Consistency regardless of the number of replicas of Quay / Clair -* **OpenShift integration:** Additional services to leverage OpenShift Monitoring and Alerting facilities to manage multiple Quay deployments on a single cluster +Several Operators are available to help integrate standalone and Operator based deployments of{productname} with {ocp}, like the following: +{productname} Cluster Security Operator:: Relays {productname} vulnerability scanning results into the {ocp} console +{productname} Bridge Operator:: Ensures seamless integration and user experience by using {productname} with {ocp} in conjunction with {ocp} Builds and ImageStreams \ No newline at end of file diff --git a/modules/core-prereqs-db.adoc b/modules/core-prereqs-db.adoc index 5f786f068..d77c38263 100644 --- a/modules/core-prereqs-db.adoc +++ b/modules/core-prereqs-db.adoc @@ -1,16 +1,11 @@ -[[core-prereqs-db]] +:_content-type: CONCEPT +[id="arch-core-prereqs-db"] = Database backend -Quay stores most of its configuration and all metadata and logs inside its database backend. Logs can be pushed into ElasticSearch instead +{productname} stores all of its configuration information in the `config.yaml` file. Registry metadata, for example, user information, robot accounts, team, permissions, organizations, images, tags, manifests, etc. are stored inside of the database backend. Logs can be pushed to ElasticSearch if required. PostgreSQL is the preferred database backend because it can be used for both {productname} and Clair. -PostgreSQL is the preferred database backend since it can be used for both Quay and Clair - -Quay works fine with MySQL too (5.7+) but Clair requires PostgreSQL - -Quay HA requires an HA database setup - -If Quay is running on public cloud infrastructure, we recommend the use of the PostgreSQL services provided by your cloud provider. - -Geo-replication requires a single, shared database that is accessible from all regions +A future version of {productname} will remove support for using MySQL and MariaDB as the database backend, which has been deprecated since the {productname} 3.6 release. Until then, MySQL is still supported according to the link:https://access.redhat.com/articles/4067991[support matrix], but will not receive additional features or explicit testing coverage. The {productname} Operator supports only PostgreSQL deployments when the database is managed. If you want to use MySQL, you must deploy it manually and set the database component to `managed: false`. +Deploying {productname} in a highly available (HA) configuration requires that your database services are provisioned for high availability. If {productname} is running on public cloud infrastructure, it is recommended that you use the PostgreSQL services provided by your cloud provider, however MySQL is also supported. +Geo-replication requires a single, shared database that is accessible from all regions. diff --git a/modules/core-prereqs-redis.adoc b/modules/core-prereqs-redis.adoc index 0e306e500..ab24591aa 100644 --- a/modules/core-prereqs-redis.adoc +++ b/modules/core-prereqs-redis.adoc @@ -1,6 +1,9 @@ -[[core-prereqs-redis]] +:_content-type: CONCEPT +[id="core-prereqs-redis"] = Redis -Quay stores builder logs inside a Redis cache. The data stored is ephemeral in nature and as such, Redis does not need to be HA even though it is stateful. if Redis does fail, you will only lose access to build logs. +{productname} stores builder logs inside a Redis cache. Because the data stored is ephemeral, Redis does not need to be highly available even though it is stateful. -You can use a Redis image from the Red Hat Software Collections or from any other source you prefer. +If Redis fails, you will lose access to build logs, builders, and the garbage collector service. Additionally, user events will be unavailable. + +You can use a Redis image from the Red Hat Software Collections or from any other source you prefer. diff --git a/modules/core-prereqs-storage.adoc b/modules/core-prereqs-storage.adoc index 0f49ec60d..97713f99b 100644 --- a/modules/core-prereqs-storage.adoc +++ b/modules/core-prereqs-storage.adoc @@ -1,30 +1,31 @@ -[[core-prereqs-storage]] +:_content-type: CONCEPT +[id="core-prereqs-storage"] = Image storage backend +{productname} stores all binary blobs in its storage backend. -{productname} Quay stores all binary blobs in its storage backend. The following conditions apply to image storage: +Local storage:: {productname} can work with local storage, however this should only be used for proof of concept or test setups, as the durability of the binary blobs cannot be guaranteed. +HA storage setup:: For a {productname} HA deployment, you must provide HA image storage, for example: ++ +- **Red Hat OpenShift Data Foundation**, previously known as Red Hat OpenShift Container Storage, is software-defined storage for containers. Engineered as the data and storage services platform for {ocp}, Red Hat OpenShift Data Foundation helps teams develop and deploy applications quickly and efficiently across clouds. More information can be found at link:https://www.redhat.com/en/technologies/cloud-computing/openshift-data-foundation[]. +- **Ceph Object Gateway** (also called RADOS Gateway) is an example of a storage solution that can provide the the object storage needed by {productname}. +Detailed instructions on how to use Ceph storage as a highly available storage backend can be found in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_-_high_availability/preparing_for_red_hat_quay_high_availability#set_up_ceph[Quay High Availability Guide]. +Further information about Red Hat Ceph Storage and HA setups can be found in the link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/pdf/architecture_guide/Red_Hat_Ceph_Storage-3-Architecture_Guide-en-US.pdf[Red Hat Ceph Storage Architecture Guide] -* Local storage and NFS should only be used for PoC / test setups -* Quay HA requires an HA storage setup -* Geo-replication requires object storage and does not work with local storage +Geo-replication:: Local storage cannot be used for geo-replication, so a supported on premise or cloud based object storage solution must be deployed. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the {productname} instance, and will then be replicated, in the background, to the other storage engines. This requires the image storage to be accessible from all regions. +[id="arch-supported-image-storage-types"] +== Supported image storage engines -== Supported on-premise storage types +{productname} supports the following on premise storage types: -{productname} Quay supports the following on-premise storage types: - -* Ceph Rados RGW +* Ceph/Rados RGW * OpenStack Swift -* RHODF 4 (via NooBaa) -// TODO 36 Is RHOCS 3 supported? -// * RHOCS 3 (via NooBaa) - - -== Supported public cloud storage types +* Red Hat OpenShift Data Foundation 4 (through NooBaa) -{productname} Quay supports the following public cloud storage types: +{productname} supports the following public cloud storage engines: -* AWS S3 +* Amazon Web Services (AWS) S3 * Google Cloud Storage * Azure Blob Storage diff --git a/modules/core-prereqs.adoc b/modules/core-prereqs.adoc deleted file mode 100644 index 8349fa758..000000000 --- a/modules/core-prereqs.adoc +++ /dev/null @@ -1,10 +0,0 @@ -[[core-prereqs]] -= Quay prerequisites - -Before deploying Quay, you will need to provision the following: - -* xref:core-prereqs-storage[Image storage] -* xref:core-prereqs-db[Database] -* xref:core-prereqs-redis[Redis] - - diff --git a/modules/core-sample-quay-on-prem.adoc b/modules/core-sample-quay-on-prem.adoc index 6ae69e54b..bd9594ad0 100644 --- a/modules/core-sample-quay-on-prem.adoc +++ b/modules/core-sample-quay-on-prem.adoc @@ -1,10 +1,12 @@ -[[sample-quay-on-prem-intro]] -= Sample {productname} on-premise configuration +:_content-type: CONCEPT +[id="sample-quay-on-prem-intro"] += Deploying {productname} on premise -The following image shows examples for on-premise configuration, including: +The following image shows examples for on premise configuration, for the following types of deployments: -* Standalone proof of concept +* Standalone Proof of Concept * Highly available deployment on multiple hosts -* Deployment on OpenShift cluster using Operator +* Deployment on an {ocp} cluster by using the {productname} Operator -image:178_Quay_architecture_0821_on-premises_config.png[On-prem sample configuration] +.On premise example configurations +image:178_Quay_architecture_0821_on-premises_config.png[On premise example configuration] diff --git a/modules/custom-clair-configuration-managed-database.adoc b/modules/custom-clair-configuration-managed-database.adoc new file mode 100644 index 000000000..34f518776 --- /dev/null +++ b/modules/custom-clair-configuration-managed-database.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="custom-clair-configuration-managed-database"] += Running a custom Clair configuration with a managed Clair database + +In some cases, users might want to run a custom Clair configuration with a managed Clair database. This is useful in the following scenarios: + +* When a user wants to disable specific updater resources. +* When a user is running {productname} in an disconnected environment. For more information about running Clair in a disconnected environment, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index#clair-openshift-airgap-database[Configuring access to the Clair database in the air-gapped OpenShift cluster]. ++ +[NOTE] +==== +* If you are running {productname} in an disconnected environment, the `airgap` parameter of your `clair-config.yaml` must be set to `true`. +* If you are running {productname} in an disconnected environment, you should disable all updater components. +==== diff --git a/modules/deploying-the-operator-using-initial-configuration.adoc b/modules/deploying-the-operator-using-initial-configuration.adoc new file mode 100644 index 000000000..3306fd5b3 --- /dev/null +++ b/modules/deploying-the-operator-using-initial-configuration.adoc @@ -0,0 +1,43 @@ +:_content-type: PROCEDURE +[id="deploying-the-operator-using-initial-configuration"] +== Deploying the {productname} Operator using the initial configuration + +Use the following procedure to deploy {productname} on {ocp} using the initial configuration. + +.Prerequisites + +* You have installed the `oc` CLI. + +.Procedure + +. Create a secret using the configuration file: ++ +[source,terminal] +---- +$ oc create secret generic -n quay-enterprise --from-file config.yaml=./config.yaml init-config-bundle-secret +---- + +. Create a `quayregistry.yaml` file. Identify the unmanaged components and reference the created secret, for example: ++ + +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + configBundleSecret: init-config-bundle-secret +---- + +. Deploy the {productname} registry: ++ +[source,terminal] +---- +$ oc create -n quay-enterprise -f quayregistry.yaml +---- + +.Next Steps + +* xref:using-the-api-to-create-first-user[Using the API to create the first user] diff --git a/modules/deployment-topology-with-storage-proxy.adoc b/modules/deployment-topology-with-storage-proxy.adoc index 431216368..5e5f78717 100644 --- a/modules/deployment-topology-with-storage-proxy.adoc +++ b/modules/deployment-topology-with-storage-proxy.adoc @@ -1,8 +1,10 @@ -[[deployment-topology-with-storage-proxy]] -= {productname} deployment topology with storage proxy +:_content-type: CONCEPT +[id="deployment-topology-with-storage-proxy"] += {productname} deployment topology with storage proxy -The following image provides a high level overview of a {productname} deployment topology with a storage proxy configured. +The following image provides a high level overview of a {productname} deployment topology with storage proxy configured: +.{productname} deployment topology with storage proxy image:178_Quay_architecture_0821_deploy_topology_storage.png[{productname} deployment topology with storage proxy] -With a storage proxy configured, all traffic passes through the public Quay endpoint. +With storage proxy configured, all traffic passes through the public {productname} endpoint. \ No newline at end of file diff --git a/modules/deployment-topology.adoc b/modules/deployment-topology.adoc index 96dbb2832..571f1bdf7 100644 --- a/modules/deployment-topology.adoc +++ b/modules/deployment-topology.adoc @@ -1,8 +1,10 @@ -[[deployment-topology]] +:_content-type: CONCEPT +[id="deployment-topology"] = {productname} deployment topology -The following image provides a high level overview of a {productname} deployment topology. +The following image provides a high level overview of a {productname} deployment topology: +.{productname} deployment topology image:178_Quay_architecture_0821_deploy_topology.png[{productname} deployment topology] -In this deployment, all pushes, UI, and API requests come in via public Quay endpoints. Pulls are served directly from `object storage`. +In this deployment, all pushes, user interface, and API requests are received by public {productname} endpoints. Pulls are served directly from `object storage`. \ No newline at end of file diff --git a/modules/downgrade-quay-deployment.adoc b/modules/downgrade-quay-deployment.adoc new file mode 100644 index 000000000..63934f2bf --- /dev/null +++ b/modules/downgrade-quay-deployment.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT + +[id="downgrade-quay-deployment"] += Downgrading {productname} + +{productname} only supports rolling back, or downgrading, to previous z-stream versions, for example, 3.7.2 -> 3.7.1. Rolling back to previous y-stream versions (3.7.0 -> 3.6.0) is not supported. This is because {productname} updates might contain database schema upgrades that are applied when upgrading to a new version of {productname}. Database schema upgrades are not considered backwards compatible. + +[IMPORTANT] +==== +Downgrading to previous z-streams is neither recommended nor supported by either Operator based deployments or virtual machine based deployments. Downgrading should only be done in extreme circumstances. The decision to rollback your {productname} deployment must be made in conjunction with the {productname} support and development teams. For more information, contact {productname} support. +==== \ No newline at end of file diff --git a/modules/external-registry-config-api-example.adoc b/modules/external-registry-config-api-example.adoc new file mode 100644 index 000000000..a387f825e --- /dev/null +++ b/modules/external-registry-config-api-example.adoc @@ -0,0 +1,25 @@ +:_content-type: CONCEPT +[id="external-registry-config-api-example"] + +=== external_registry_config object reference + +[source,yaml] +---- +{ + "is_enabled": True, + "external_reference": "quay.io/redhat/quay", + "sync_interval": 5000, + "sync_start_date": datetime(2020, 0o1, 0o2, 6, 30, 0), + "external_registry_username": "fakeUsername", + "external_registry_password": "fakePassword", + "external_registry_config": { + "verify_tls": True, + "unsigned_images": False, + "proxy": { + "http_proxy": "http://insecure.proxy.corp", + "https_proxy": "https://secure.proxy.corp", + "no_proxy": "mylocalhost", + }, + }, + } +---- \ No newline at end of file diff --git a/modules/fine-grained-access-control-intro.adoc b/modules/fine-grained-access-control-intro.adoc index 7645b0e0a..a88fe0c8d 100644 --- a/modules/fine-grained-access-control-intro.adoc +++ b/modules/fine-grained-access-control-intro.adoc @@ -1,13 +1,11 @@ [[fine-grained-access-control]] -= Fine Grained Access Control += Fine-grained access control {productname} allow users to integrate their existing identity infrastructure and use a fine-grained permissions system to map their organizational structure and grant access to whole teams to manage specific repositories. {productname} is supported by the following authentication providers: -* Built-in Database Authentication +* Built-in database authentication * Lightweight Directory Access Protocol (LDAP) authentication and _sync * External OpenID Connect (OIDC) provider * OpenStack Keystone - -// Wondering if we could cut the following sections and refer to material already in the docs. For example, we could use the above bullet points on LDAP and link to https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/ldap-authentication-setup-for-quay-enterpriseI just copied what was in the tech deck. diff --git a/modules/fips-overview.adoc b/modules/fips-overview.adoc index 7dbdbec6d..666f86408 100644 --- a/modules/fips-overview.adoc +++ b/modules/fips-overview.adoc @@ -1,9 +1,11 @@ -[[fips-overview]] -= FIPS readiness and compliance +// Module included in the following assemblies: +// +// clair/master.adoc -FIPS (the Federal Information Processing Standard developed by the National Institute of Standards and Technology, NIST) is regarded as the gold standard for securing and encrypting sensitive data, particularly in heavily regulated areas such as banking, healthcare and the public sector. Red Hat Enterprise Linux and Red Hat OpenShift Container Platform support this standard by providing a FIPS mode in which the system would only allow usage of certain, FIPS-validated cryptographic modules, like `openssl`. This ensures FIPS compliance. - - -{productname} supports running on RHEL and OCP in FIPS mode in production since version 3.5. Furthermore, {productname} itself also commits to exclusively using cryptography libraries that are validated or are in the process of being validated by NIST. {productname} 3.5 has pending FIPS 140-2 validation based on the RHEL 8.3 cryptography libraries. As soon as that validation is finalized, {productname} will be officially FIPS compliant. +:_content-type: CONCEPT +[id="fips-overview"] += Federal Information Processing Standard (FIPS) readiness and compliance +The Federal Information Processing Standard (FIPS) developed by the National Institute of Standards and Technology (NIST) is regarded as the highly regarded for securing and encrypting sensitive data, notably in highly regulated areas such as banking, healthcare, and the public sector. {rhel} and {ocp} support the FIPS standard by providing a _FIPS mode_, in which the system only allows usage of specific FIPS-validated cryptographic modules like `openssl`. This ensures FIPS compliance. +{productname} supports running on FIPS-enabled RHEL and {ocp} environments from {productname} version 3.5.0. diff --git a/modules/garbage-collection.adoc b/modules/garbage-collection.adoc new file mode 100644 index 000000000..1d327194e --- /dev/null +++ b/modules/garbage-collection.adoc @@ -0,0 +1,176 @@ +:_content-type: CONCEPT +[id="garbage-collection"] += {productname} garbage collection + +{productname} includes automatic and continuous image garbage collection. Garbage collection ensures efficient use of resources for active objects by removing objects that occupy sizeable amounts of disk space, such as dangling or untagged images, repositories, and blobs, including layers and manifests. Garbage collection performed by {productname} can reduce downtime in your organization's environment. + +[id="garbage-collection-practice"] +== {productname} garbage collection in practice + +Currently, all garbage collection happens discreetly, and there are no commands to manually run garbage collection. {productname} provides metrics that track the status of the different garbage collection workers. + +For namespace and repository garbage collection, the progress is tracked based on the size of their respective queues. Namespace and repository garbage collection workers require a global lock to work. As a result, and for performance reasons, only one worker runs at a time. + +[NOTE] +==== +{productname} shares blobs between namespaces and repositories in order to conserve disk space. For example, if the same image is pushed 10 times, only one copy of that image will be stored. + +It is possible that tags can share their layers with different images already stored somewhere in {productname}. In that case, blobs will stay in storage, because deleting shared blobs would make other images unusable. + +Blob expiration is independent of the time machine. If you push a tag to {productname} and the time machine is set to 0 seconds, and then you delete a tag immediately, garbage collection deletes the tag and everything related to that tag, but will not delete the blob storage until the blob expiration time is reached. +==== + +Garbage collecting tagged images works differently than garbage collection on namespaces or repositories. Rather than having a queue of items to work with, the garbage collection workers for tagged images actively search for a repository with inactive or expired tags to clean up. Each instance of garbage collection workers will grab a repository lock, which results in one worker per repository. + +[NOTE] +==== +* In {productname}, inactive or expired tags are manifests without tags because the last tag was deleted or it expired. The manifest stores information about how the image is composed and stored in the database for each individual tag. When a tag is deleted and the allotted time from *Time Machine* has been met, {productname} garbage collects the blobs that are not connected to any other manifests in the registry. If a particular blob is connected to a manifest, then it is preserved in storage and only its connection to the manifest that is being deleted is removed. +* Expired images will disappear after the allotted time, but are still stored in {productname}. The time in which an image is completely deleted, or collected, depends on the *Time Machine* setting of your organization. The default time for garbage collection is 14 days unless otherwise specified. Until that time, tags can be pointed to an expired or deleted images. +==== + +For each type of garbage collection, {productname} provides metrics for the number of rows per table deleted by each garbage collection worker. The following image shows an example of how {productname} monitors garbage collection with the same metrics: + +image:garbage-collection-metrics.png[Garbage collection metrics] + +[id="measuring-storage-reclamation"] +=== Measuring storage reclamation + +{productname} does not have a way to track how much space is freed up by garbage collection. Currently, the best indicator of this is by checking how many blobs have been deleted in the provided metrics. + +[NOTE] +==== +The `UploadedBlob` table in the {productname} metrics tracks the various blobs that are associated with a repository. When a blob is uploaded, it will not be garbage collected before the time designated by the `PUSH_TEMP_TAG_EXPIRATION_SEC` parameter. This is to avoid prematurely deleting blobs that are part of an ongoing push. For example, if garbage collection is set to run often, and a tag is deleted in the span of less than one hour, then it is possible that the associated blobs will not get cleaned up immediately. Instead, and assuming that the time designated by the `PUSH_TEMP_TAG_EXPIRATION_SEC` parameter has passed, the associated blobs will be removed the next time garbage collection runs on that same repository. +==== + +[id="garbage-collection-configuration-fields"] +== Garbage collection configuration fields + +The following configuration fields are available to customize what is garbage collected, and the frequency at which garbage collection occurs: + +[cols="3a,1a,2a",options="header"] +|=== +|Name |Description |Schema +| **FEATURE_GARBAGE_COLLECTION** | Whether garbage collection is enabled for image tags. Defaults to `true`. |Boolean +| **FEATURE_NAMESPACE_GARBAGE_COLLECTION** | Whether garbage collection is enabled for namespaces. Defaults to `true`. |Boolean +| **FEATURE_REPOSITORY_GARBAGE_COLLECTION** | Whether garbage collection is enabled for repositories. Defaults to `true`. |Boolean +| **GARBAGE_COLLECTION_FREQUENCY** | The frequency, in seconds, at which the garbage collection worker runs. Affects only garbage collection workers. Defaults to 30 seconds. |String +| **PUSH_TEMP_TAG_EXPIRATION_SEC** | The number of seconds that blobs will not be garbage collected after being uploaded. This feature prevents garbage collection from cleaning up blobs that are not referenced yet, but still used as part of an ongoing push. |String +| **TAG_EXPIRATION_OPTIONS** | List of valid tag expiration values. |String +| **DEFAULT_TAG_EXPIRATION** | Tag expiration time for time machine. |String +| **CLEAN_BLOB_UPLOAD_FOLDER** | Automatically cleans stale blobs left over from an S3 multipart upload. By default, blob files older than two days are cleaned up every hour. | Boolean ++ +**Default:** `true` + +|=== + +[id="disabling-garbage-collection"] +== Disabling garbage collection + +The garbage collection features for image tags, namespaces, and repositories are stored in the `config.yaml` file. These features default to `true`. + +In rare cases, you might want to disable garbage collection, for example, to control when garbage collection is performed. You can disable garbage collection by setting the `GARBAGE_COLLECTION` features to `false`. When disabled, dangling or untagged images, repositories, namespaces, layers, and manifests are not removed. This might increase the downtime of your environment. + + +[NOTE] +==== +There is no command to manually run garbage collection. Instead, you would disable, and then re-enable, the garbage collection feature. +==== + +[id="garbage-collection-quota-management"] +== Garbage collection and quota management + +{productname} introduced quota management in 3.7. With quota management, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. + +As of {productname} 3.7, garbage collection reclaims memory that was allocated to images, repositories, and blobs after deletion. Because the garbage collection feature reclaims memory after deletion, there is a discrepancy between what is stored in an environment's disk space and what quota management is reporting as the total consumption. There is currently no workaround for this issue. + +[id="garbage-collection-procedure"] +== Garbage collection in practice + +Use the following procedure to check your {productname} logs to ensure that garbage collection is working. + +.Procedure + +. Enter the following command to ensure that garbage collection is properly working: ++ +[source,terminal] +---- +$ sudo podman logs +---- ++ +Example output: ++ +[source,terminal] +---- +gcworker stdout | 2022-11-14 18:46:52,458 [63] [INFO] [apscheduler.executors.default] Job "GarbageCollectionWorker._garbage_collection_repos (trigger: interval[0:00:30], next run at: 2022-11-14 18:47:22 UTC)" executed successfully +---- + +. Delete an image tag. + +. Enter the following command to ensure that the tag was deleted: ++ +[source,terminal] +---- +$ podman logs quay-app +---- ++ +Example output: ++ +[source,terminal] +---- +gunicorn-web stdout | 2022-11-14 19:23:44,574 [233] [INFO] [gunicorn.access] 192.168.0.38 - - [14/Nov/2022:19:23:44 +0000] "DELETE /api/v1/repository/quayadmin/busybox/tag/test HTTP/1.0" 204 0 "http://quay-server.example.com/repository/quayadmin/busybox?tab=tags" "Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0" +---- + +[id="garbage-collection-metrics"] +== {productname} garbage collection metrics + +The following metrics show how many resources have been removed by garbage collection. These metrics show how many times the garbage collection workers have run and how many namespaces, repositories, and blobs were removed. + +[options="header"] +|=== +| Metric name | Description +| quay_gc_iterations_total | Number of iterations by the GCWorker +| quay_gc_namespaces_purged_total | Number of namespaces purged by the NamespaceGCWorker +| quay_gc_repos_purged_total | Number of repositories purged by the RepositoryGCWorker or NamespaceGCWorker +| quay_gc_storage_blobs_deleted_total | Number of storage blobs deleted +|=== + + +.Sample metrics output +[source,terminal] +---- +# TYPE quay_gc_iterations_created gauge +quay_gc_iterations_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823190189714e+09 +... + +# HELP quay_gc_iterations_total number of iterations by the GCWorker +# TYPE quay_gc_iterations_total counter +quay_gc_iterations_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... + +# TYPE quay_gc_namespaces_purged_created gauge +quay_gc_namespaces_purged_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823190189433e+09 +... + +# HELP quay_gc_namespaces_purged_total number of namespaces purged by the NamespaceGCWorker +# TYPE quay_gc_namespaces_purged_total counter +quay_gc_namespaces_purged_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +.... + +# TYPE quay_gc_repos_purged_created gauge +quay_gc_repos_purged_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.631782319018925e+09 +... + +# HELP quay_gc_repos_purged_total number of repositories purged by the RepositoryGCWorker or NamespaceGCWorker +# TYPE quay_gc_repos_purged_total counter +quay_gc_repos_purged_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... + +# TYPE quay_gc_storage_blobs_deleted_created gauge +quay_gc_storage_blobs_deleted_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823190189059e+09 +... + +# HELP quay_gc_storage_blobs_deleted_total number of storage blobs deleted +# TYPE quay_gc_storage_blobs_deleted_total counter +quay_gc_storage_blobs_deleted_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... +---- \ No newline at end of file diff --git a/modules/georepl-arch-operator.adoc b/modules/georepl-arch-operator.adoc index 0da61e842..de321a0c0 100644 --- a/modules/georepl-arch-operator.adoc +++ b/modules/georepl-arch-operator.adoc @@ -1,8 +1,8 @@ [[georepl-arch-operator]] -= Geo-replication - Quay Operator += Geo-replication using the {productname} Operator -== Geo-replication architecture - Quay Operator +image:178_Quay_architecture_0821_georeplication_openshift-temp.png[Geo-replication architecture] -image:178_Quay_architecture_0821_georeplication_openshift.png[Georeplication architecture] +In the example shown above, the {productname} Operator is deployed in two separate regions, with a common database and a common Redis instance. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the Quay instance, and will then be replicated, in the background, to the other storage engines. -In the example shown above, Quay Operator is deployed in two separate regions, with a common database and a common Redis instance. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the Quay instance, and will then be replicated, in the background, to the other storage engines. +Because the Operator now manages the Clair security scanner and its database separately, geo-replication setups can be leveraged so that they do not manage the Clair database. Instead, an external shared database would be used. {productname} and Clair support several providers and vendors of PostgreSQL, which can be found in the {productname} 3.x link:https://access.redhat.com/articles/4067991[test matrix]. Additionally, the Operator also supports custom Clair configurations that can be injected into the deployment, which allows users to configure Clair with the connection credentials for the external database. diff --git a/modules/georepl-arch-standalone.adoc b/modules/georepl-arch-standalone.adoc index 094f324e7..91697f9a3 100644 --- a/modules/georepl-arch-standalone.adoc +++ b/modules/georepl-arch-standalone.adoc @@ -1,8 +1,13 @@ -[[georepl-arch-standalone]] -= Geo-replication - standalone Quay +:_content-type: CONCEPT +[id="georepl-arch-standalone"] += Geo-replication using standalone {productname} -== Geo-replication architecture - standalone Quay +In the following image, {productname} is running standalone in two separate regions, with a common database and a common Redis instance. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the {productname} instance, and will then be replicated, in the background, to the other storage engines. -image:178_Quay_architecture_0821_georeplication.png[Georeplication] +[NOTE] +==== +If Clair fails in one cluster, for example, the US cluster, US users would not see vulnerability reports in {productname} for the second cluster (EU). This is because all Clair instances have the same state. When Clair fails, it is usually because of a problem within the cluster. +==== -In the example shown above, Quay is running standalone in two separate regions, with a common database and a common Redis instance. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the Quay instance, and will then be replicated, in the background, to the other storage engines. +.Geo-replication architecture +image:178_Quay_architecture_0821_georeplication.png[Geo-replication] \ No newline at end of file diff --git a/modules/georepl-arch.adoc b/modules/georepl-arch.adoc new file mode 100644 index 000000000..f8e24c53c --- /dev/null +++ b/modules/georepl-arch.adoc @@ -0,0 +1,6 @@ +[[georepl-arch]] += Geo-replication architecture for standalone {productname} + +image:178_Quay_architecture_0821_georeplication.png[Georeplication] + +In the example shown above, {productname} is running in two separate regions, with a common database and a common Redis instance. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the Quay instance, and will then be replicated, in the background, to the other storage engines. diff --git a/modules/georepl-deploy-operator.adoc b/modules/georepl-deploy-operator.adoc index befacc51f..6427131e4 100644 --- a/modules/georepl-deploy-operator.adoc +++ b/modules/georepl-deploy-operator.adoc @@ -54,11 +54,13 @@ Ideally one object storage bucket will be close to the 1st cluster (primary) whi The `config.yaml` file is shared between clusters, and will contain the details for the common PostgreSQL, Redis and storage backends: .config.yaml +[source,yaml] ---- +SERVER_HOSTNAME: <1> DB_CONNECTION_ARGS: autorollback: true threadlocals: true -DB_URI: postgresql://postgres:password@10.19.0.1:5432/quay <1> +DB_URI: postgresql://postgres:password@10.19.0.1:5432/quay <2> BUILDLOGS_REDIS: host: 10.19.0.2 port: 6379 @@ -84,8 +86,10 @@ DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: DISTRIBUTED_STORAGE_PREFERENCE: - usstorage - eustorage +FEATURE_STORAGE_REPLICATION: true ---- -<1> The PostgreSQL DB_URI must also be included in the Clair configuration file. For more information about retrieving the Clair configuration file on OpenShift, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Retrieving the Clair config]. +<1> A proper `SERVER_HOSTNAME` must be used for the route and must match the hostname of the global load balancer. +<2> To retrieve the configuration file for a Clair instance deployed using the OpenShift Operator, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Retrieving the Clair config]. Create the `configBundleSecret`: @@ -96,6 +100,11 @@ $ oc create secret generic --from-file config.yaml=./config.yaml georep-config-b In each of the clusters, set the `configBundleSecret` and use the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environmental variable override to configure the appropriate storage for that cluster: +[NOTE] +==== +The `config.yaml` file between both deployments must match. If making a change to one cluster, it must also be changed in the other. +==== + .US cluster ---- apiVersion: quay.redhat.com/v1 @@ -106,6 +115,12 @@ metadata: spec: configBundleSecret: georep-config-bundle components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: tls + managed: false - kind: postgres managed: false - kind: clairpostgres @@ -116,9 +131,20 @@ spec: managed: true overrides: env: - - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE - value: usstorage + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: usstorage + - kind: mirror + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: usstorage ---- ++ +[NOTE] +==== +Because TLS is unmanaged, and the route is managed, you must supply the certificates with either with the config tool or directly in the config bundle. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-preconfigure#operator-preconfig-tls-routes[Configuring TLS and routes]. +==== .European cluster ---- @@ -130,6 +156,12 @@ metadata: spec: configBundleSecret: georep-config-bundle components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: tls + managed: false - kind: postgres managed: false - kind: clairpostgres @@ -140,6 +172,17 @@ spec: managed: true overrides: env: - - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE - value: eustorage + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: eustorage + - kind: mirror + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: eustorage ---- ++ +[NOTE] +==== +Because TLS is unmanaged, and the route is managed, you must supply the certificates with either with the config tool or directly in the config bundle. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-preconfigure#operator-preconfig-tls-routes[Configuring TLS and routes]. +==== diff --git a/modules/georepl-deploy-standalone.adoc b/modules/georepl-deploy-standalone.adoc index 410995add..39b569a24 100644 --- a/modules/georepl-deploy-standalone.adoc +++ b/modules/georepl-deploy-standalone.adoc @@ -11,13 +11,13 @@ For example, for a machine running in Europe with the config directory on the host available from `$QUAY/config`: + [subs="verbatim,attributes"] -.... +---- $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -e QUAY_DISTRIBUTED_STORAGE_PREFERENCE=europestorage \ {productrepo}/{quayimage}:{productminv} -.... +---- + [NOTE] ==== diff --git a/modules/georepl-intro.adoc b/modules/georepl-intro.adoc index 202dcbe71..4a7986a3c 100644 --- a/modules/georepl-intro.adoc +++ b/modules/georepl-intro.adoc @@ -1,14 +1,20 @@ -[[georepl-intro]] +:_content-type: CONCEPT +[id="georepl-intro"] = Geo-replication -Geo-replication allows multiple, geographically distributed Quay deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed Quay setup. Image data is asynchronously replicated in the background with transparent failover / redirect for clients. +Geo-replication allows multiple, geographically distributed {productname} deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed {productname} setup. Image data is asynchronously replicated in the background with transparent failover and redirect for clients. -With {productname} 3.7, deployments of {productname} with geo-replication is supported by standalone and Operator deployments. +With {productname} 3.7, deployments of {productname} with geo-replication is supported on standalone and Operator deployments. +[id="arch-georpl-features"] == Geo-replication features -* When geo-replication is configured, container image pushes will be written to the preferred storage engine for that Red Hat Quay instance (typically the nearest storage backend within the region). +* When geo-replication is configured, container image pushes will be written to the preferred storage engine for that {productname} instance. This is typically the nearest storage backend within the region. + * After the initial push, image data will be replicated in the background to other storage engines. + * The list of replication locations is configurable and those can be different storage backends. + * An image pull will always use the closest available storage engine, to maximize pull performance. -* If replication hasn’t been completed yet, the pull will use the source storage backend instead. + +* If replication has not been completed yet, the pull will use the source storage backend instead. \ No newline at end of file diff --git a/modules/georepl-mixed-storage.adoc b/modules/georepl-mixed-storage.adoc index b6e35b039..4bc972898 100644 --- a/modules/georepl-mixed-storage.adoc +++ b/modules/georepl-mixed-storage.adoc @@ -1,12 +1,12 @@ -[[georepl-mixed-storage]] +:_content-type: CONCEPT +[id="georepl-mixed-storage"] = Mixed storage for geo-replication -Quay geo-replication supports the use of different, and multiple, replication targets for example, using AWS S3 storage on public cloud and using Ceph storage on-prem. -This complicates the key requirement of granting access to all storage backends from all Quay pods and cluster nodes. As a result, it is recommended that you: +{productname} geo-replication supports the use of different and multiple replication targets, for example, using AWS S3 storage on public cloud and using Ceph storage on premise. This complicates the key requirement of granting access to all storage backends from all {productname} pods and cluster nodes. As a result, it is recommended that you use the following: -* Use a VPN to prevent visibility of the internal storage _or_ -* Use a token pair that only allows access to the specified bucket used by Quay +* A VPN to prevent visibility of the internal storage, _or_ +* A token pair that only allows access to the specified bucket used by {productname} -This will result in the public cloud instance of Quay having access to on-prem storage but the network will be encrypted, protected, and will use ACLs, thereby meeting security requirements. +This will result in the public cloud instance of {productname} having access to on premise storage, but the network will be encrypted, protected, and will use ACLs, thereby meeting security requirements. -If you cannot implement these security measures, it may be preferable to deploy two distinct Quay registries and to use repository mirroring as an alternative to geo-replication. +If you cannot implement these security measures, it may be preferable to deploy two distinct {productname} registries and to use repository mirroring as an alternative to geo-replication. \ No newline at end of file diff --git a/modules/georepl-prereqs.adoc b/modules/georepl-prereqs.adoc index dd2d9d9fd..efc9bf622 100644 --- a/modules/georepl-prereqs.adoc +++ b/modules/georepl-prereqs.adoc @@ -1,15 +1,37 @@ -[[georepl-prereqs]] +:_content-type: CONCEPT +[id="arch-georepl-prereqs"] = Geo-replication requirements and constraints -* A single database, and therefore all metadata and Quay configuration, is shared across all regions. -* A single Redis cache is shared across the entire Quay setup and needs to accessible by all Quay pods. +* In geo-replicated setups, {productname} requires that all regions are able to read and write to all other region's object storage. Object storage must be geographically accessible by all other regions. + +* In case of an object storage system failure of one geo-replicating site, that site's {productname} deployment must be shut down so that clients are redirected to the remaining site with intact storage systems by a global load balancer. Otherwise, clients will experience pull and push failures. + +* {productname} has no internal awareness of the health or availability of the connected object storage system. If the object storage system of one site becomes unavailable, there will be no automatic redirect to the remaining storage system, or systems, of the remaining site, or sites. + +* Geo-replication is asynchronous. The permanent loss of a site incurs the loss of the data that has been saved in that sites' object storage system but has not yet been replicated to the remaining sites at the time of failure. + +* A single database, and therefore all metadata and {productname} configuration, is shared across all regions. ++ +Geo-replication does not replicate the database. In the event of an outage, {productname} with geo-replication enabled will not failover to another database. + +* A single Redis cache is shared across the entire {productname} setup and needs to accessible by all {productname} pods. + * The exact same configuration should be used across all regions, with exception of the storage backend, which can be configured explicitly using the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable. -* Geo-Replication requires object storage in each region. It does not work with local storage or NFS. -* Each region must be able to access every storage engine in each region (requires a network path). + +* Geo-replication requires object storage in each region. It does not work with local storage. + +* Each region must be able to access every storage engine in each region, which requires a network path. + * Alternatively, the storage proxy option can be used. -* The entire storage backend (all blobs) is replicated. This is in contrast to repository mirroring, which can be limited to an organization or repository or image. -* All Quay instances must share the same entrypoint, typically via load balancer. -* All Quay instances must have the same set of superusers, as they are defined inside the common configuration file. -* Geo-Replication requires SSL/TSL certificates and keys. For more information, see link:https://dxp-docs.ext.us-west.aws.prod.paas.redhat.com/documentation/en-us/red_hat_quay/3.6/html-single/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/index#using_ssl_to_protect_connections_to_red_hat_quay[Using SSL to protect connections to Red Hat Quay]. -If the above requirements cannot be met, you should instead use two or more distinct Quay deployments and take advantage of repository mirroring functionality. +* The entire storage backend, for example, all blobs, is replicated. Repository mirroring, by contrast, can be limited to an organization, repository, or image. + +* All {productname} instances must share the same entrypoint, typically through a load balancer. + +* All {productname} instances must have the same set of superusers, as they are defined inside the common configuration file. + +* Geo-replication requires your Clair configuration to be set to `unmanaged`. An unmanaged Clair database allows the {productname} Operator to work in a geo-replicated environment, where multiple instances of the {productname} Operator must communicate with the same database. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index#clair-unmanaged[Advanced Clair configuration]. + +* Geo-Replication requires SSL/TSL certificates and keys. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/index#using_ssl_to_protect_connections_to_red_hat_quay[Using SSL/TSL to protect connections to {productname}]. + +If the above requirements cannot be met, you should instead use two or more distinct {productname} deployments and take advantage of repository mirroring functions. \ No newline at end of file diff --git a/modules/helm-oci-prereqs.adoc b/modules/helm-oci-prereqs.adoc index c04f6e8fe..a82c52692 100644 --- a/modules/helm-oci-prereqs.adoc +++ b/modules/helm-oci-prereqs.adoc @@ -8,13 +8,13 @@ $ sudo cp rootCA.pem /etc/pki/ca-trust/source/anchors/ $ sudo update-ca-trust extract ---- -* **Experimental feature:** Many of the commands for interacting with Helm and OCI registries make use of the `helm chart` subcommand. At the time of writing, OCI support in Helm is still marked as an “experimental” feature and must be enabled explicitly. This is accomplished by setting the environment variable `HELM_EXPERIMENTAL_OCI=1`. +* **Generally available:** As of Helm 3.8, OCI registry support for charts is now generally available. -* **Install Helm client:** Download your desired version from link:https://github.com/helm/helm/releases[], for example, link:https://get.helm.sh/helm-v3.5.3-linux-amd64.tar.gz[]. Unpack it and move the helm binary to its desired destination: +* **Install Helm client:** Download your desired version from the link:https://github.com/helm/helm/releases[Helm releases] page. Unpack it and move the helm binary to its desired destination: + ---- -$ tar -zxvf helm-v3.5.3-linux-amd64.tar.gz +$ tar -zxvf helm-v3.8.2-linux-amd64.tar.gz $ mv linux-amd64/helm /usr/local/bin/helm ---- -* **Create organization in Quay:** Create a new organization for storing the Helm charts, using the Quay registry UI. For example, create an organization named `helm`. \ No newline at end of file +* **Create organization in Quay:** Create a new organization for storing the Helm charts, using the Quay registry UI. For example, create an organization named `helm`. diff --git a/modules/helm-oci-quay.adoc b/modules/helm-oci-quay.adoc index fa4094295..6eebb5fb0 100644 --- a/modules/helm-oci-quay.adoc +++ b/modules/helm-oci-quay.adoc @@ -1,97 +1,88 @@ [[helm-oci-quay]] -= Using Helm charts with Quay += Helm charts with {productname} -Helm, as a graduated project of the Cloud Native Computing Foundation (CNCF), has become the de facto package manager for Kubernetes as it simplifies how applications are packaged and deployed. Helm uses a packaging format called Charts which contain the Kubernetes resources representing an application. Charts can be made available for general distribution and consumption in repositories. A Helm repository is an HTTP server that serves an index.yaml metadata file and optionally a set of packaged charts. Beginning with Helm version 3, support was made available for distributing charts in OCI registries as an alternative to a traditional repository. To demonstrate how Quay can be used as a registry for Helm charts, an existing chart from a Helm repository will be used to showcase the interaction with OCI registries for chart developers and users. +Helm, as a graduated project of the Cloud Native Computing Foundation (CNCF), has become the de facto package manager for Kubernetes as it simplifies how applications are packaged and deployed. Helm uses a packaging format called Charts which contain the Kubernetes resources representing an application. Charts can be made available for general distribution and consumption in repositories. A Helm repository is an HTTP server that serves an `index.yaml` metadata file and optionally a set of packaged charts. Beginning with Helm version 3, support was made available for distributing charts in OCI registries as an alternative to a traditional repository. +== Using Helm charts with {productname} -In the following example, a sample etherpad chart is downloaded from from the Red Hat Community of Practice (CoP) repository and pushed to a local {productname} repository using the following steps: +Use the following example to download and push an etherpad chart from the Red Hat Community of Practice (CoP) repository. -* Add the appropriate repository -* Update the repository with the latest metadata -* Download and untar the chart to create a local directory called `etherpad` - -For example: +.Procedure +. Add a chart repository: ++ +[source,terminal] ---- $ helm repo add redhat-cop https://redhat-cop.github.io/helm-charts -$ helm repo update -$ helm pull redhat-cop/etherpad --version=0.0.4 --untar ---- -Tagging the chart requires use of the `helm chart save` command - this corresponds to using `podman tag` for tagging images. - +. Update the information of available charts locally from the chart repository: ++ +[source,terminal] ---- -$ helm chart save ./etherpad example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 - -ref: example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 -digest: 6850d9b21dd4b87cf20ad49f2e2c7def9655c52ea573e1ddb9d1464eeb6a46a6 -size: 3.5 KiB -name: etherpad -version: 0.0.4 -0.0.4: saved +$ helm repo update ---- - -Use the `helm chart list` command to see the local instance of the chart: - +. Download a chart from a repository: ++ +[source,terminal] ---- -helm chart list - -REF NAME VERSION DIGEST SIZE CREATED -example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 etherpad 0.0.4 ce0233f 3.5 KiB 23 seconds +$ helm pull redhat-cop/etherpad --version=0.0.4 --untar ---- - -Before pushing the chart, log in to the repository using the `helm registry login` command: - +. Package the chart into a chart archive: ++ +[source,terminal] ---- -$ helm registry login example-registry-quay-quay-enterprise.apps.user1.example.com -Username: quayadmin -Password: -Login succeeded +$ helm package ./etherpad ---- - - -Push the chart to your local Quay repository using the `helm chart push` command: - ++ +Example output ++ +[source,terminal] ---- -$ helm chart push example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 - -The push refers to repository [example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad] -ref: example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 -digest: ce0233fd014992b8e27cc648cdabbebd4dd6850aca8fb8e50f7eef6f2f49833d -size: 3.5 KiB -name: etherpad -version: 0.0.4 -0.0.4: pushed to remote (1 layer, 3.5 KiB total) +Successfully packaged chart and saved it to: /home/user/linux-amd64/etherpad-0.0.4.tgz ---- -To test that the push worked, delete the local copy and then pull the chart from the repository: - +. Log in to your Quay repository using `helm registry login`: ++ +[source,terminal] ---- -$ helm chart rm example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 -$ rm -rf etherpad -$ helm chart pull example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 - -0.0.4: Pulling from example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad -ref: example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 -digest: 6850d9b21dd4b87cf20ad49f2e2c7def9655c52ea573e1ddb9d1464eeb6a46a6 -size: 3.5 KiB -name: etherpad -version: 0.0.4 -Status: Downloaded newer chart for example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 +$ helm registry login quay370.apps.quayperf370.perfscale.devcluster.openshift.com ---- -Use the `helm chart export` command to extract the chart files: - +. Push the chart to your Quay repository using the `helm push` command: ++ +[source,terminal] +---- +$ helm push etherpad-0.0.4.tgz oci://quay370.apps.quayperf370.perfscale.devcluster.openshift.com +---- ++ +Example output: ++ +[source,terminal] +---- +Pushed: quay370.apps.quayperf370.perfscale.devcluster.openshift.com/etherpad:0.0.4 +Digest: sha256:a6667ff2a0e2bd7aa4813db9ac854b5124ff1c458d170b70c2d2375325f2451b ---- -$ helm chart export example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 -ref: example-registry-quay-quay-enterprise.apps.user1.example.com/helm/etherpad:0.0.4 -digest: ce0233fd014992b8e27cc648cdabbebd4dd6850aca8fb8e50f7eef6f2f49833d -size: 3.5 KiB -name: etherpad -version: 0.0.4 -Exported chart to etherpad/ +. Ensure that the push worked by deleting the local copy, and then pulling the chart from the repository: ++ +[source,terminal] +---- +$ rm -rf etherpad-0.0.4.tgz +---- ++ +[source,terminal] +---- +$ helm pull oci://quay370.apps.quayperf370.perfscale.devcluster.openshift.com/etherpad --version 0.0.4 +---- ++ +Example output: ++ +[source,terminal] +---- +Pulled: quay370.apps.quayperf370.perfscale.devcluster.openshift.com/etherpad:0.0.4 +Digest: sha256:4f627399685880daf30cf77b6026dc129034d68c7676c7e07020b70cf7130902 ---- diff --git a/modules/internal-api.adoc b/modules/internal-api.adoc new file mode 100644 index 000000000..1af8ee816 --- /dev/null +++ b/modules/internal-api.adoc @@ -0,0 +1,30 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="internal-api"] += Internal API endpoints for Clair + +Internal API endpoints are embedded in `/api/v1/internal` and are meant for communication between Clair microservices. + +[IMPORTANT] +==== +* If your Clair `config.yaml` file is set to `CLAIR_MODE=combo`, internal API endpoints might not exist. +* APIs are not formally exposed in Clair's OpenAPI Specification. Further information and usage is dependent on the reader. +==== + +[id="update-diffs"] +== Update diffs + +The `update_diff` endpoint exposes the API for diffing two update operations. This parameter is used by the notifier to determine the added and removed vulnerabilities on security database updates. + +[id="update-operations"] +== Update operation + +The `update_operation` endpoint exposes the API for viewing updaters activity. This is used by the notifier to determine if new updates have occurred, and triggers an update diff to see what has changed. + +[id=affected-manifest] +== AffectedManifest + +The `affected_manifest` endpoint exposes the API for retrieving affected manifests given a list of vulnerabilities. This is used by the notifier to determine the manifests that need to have a notification generated. \ No newline at end of file diff --git a/modules/managed-clair-database.adoc b/modules/managed-clair-database.adoc new file mode 100644 index 000000000..c75ec006c --- /dev/null +++ b/modules/managed-clair-database.adoc @@ -0,0 +1,28 @@ +:_content-type: PROCEDURE +[id="managed-clair-database"] += Setting a Clair database to managed + +Use the following procedure to set your Clair database to managed. + +.Procedure + +* In the Quay Operator, set the `clairpostgres` component of the `QuayRegistry` custom resource to `managed: true`: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: quay370 +spec: + configBundleSecret: config-bundle-secret + components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: tls + managed: false + - kind: clairpostgres + managed: true +---- \ No newline at end of file diff --git a/modules/mapping-repositories-to-cpe-information.adoc b/modules/mapping-repositories-to-cpe-information.adoc new file mode 100644 index 000000000..86695ec8d --- /dev/null +++ b/modules/mapping-repositories-to-cpe-information.adoc @@ -0,0 +1,44 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="mapping-repositories-to-cpe-information"] += Mapping repositories to Common Product Enumeration information + +Clair's {rhel} scanner relies on a Common Product Enumeration (CPE) file to map RPM packages to the corresponding security data to produce matching results. These files are owned by product security and updated daily. + +The CPE file must be present, or access to the file must be allowed, for the scanner to properly process RPM packages. If the file is not present, RPM packages installed in the container image will not be scanned. + +.Clair CPE mapping files +[options="header"] +|=== +|CPE | Link to JSON mapping file +| `repos2cpe` | link:https://www.redhat.com/security/data/metrics/repository-to-cpe.json[Red Hat Repository-to-CPE JSON] +| `names2repos` | link:https://access.redhat.com/security/data/metrics/container-name-repos-map.json[Red Hat Name-to-Repos JSON]. +|=== + +In addition to uploading CVE information to the database for disconnected Clair installations, you must also make the mapping file available locally: + +* For standalone {productname} and Clair deployments, the mapping file must be loaded into the Clair pod. + +* For {productname} Operator deployments on {ocp} and Clair deployments, you must set the Clair component to `unamanged`. Then, Clair must be deployed manually, setting the configuration to load a local copy of the mapping file. + +[id="mapping-repositories-to-cpe-configuration"] +== Mapping repositories to Common Product Enumeration example configuration + +Use the `repo2cpe_mapping_file` and `name2repos_mapping_file` fields in your Clair configuration to include the CPE JSON mapping files. For example: + +[source,yaml] +---- +indexer: + scanner: + repo: + rhel-repository-scanner: + repo2cpe_mapping_file: /data/cpe-map.json + package: + rhel_containerscanner: + name2repos_mapping_file: /data/repo-map.json +---- + +For more information, see link:https://www.redhat.com/en/blog/how-accurately-match-oval-security-data-installed-rpms[How to accurately match OVAL security data to installed RPMs]. \ No newline at end of file diff --git a/modules/metrics-multipart-uploads.adoc b/modules/metrics-multipart-uploads.adoc index 8f2160369..d8933f1f1 100644 --- a/modules/metrics-multipart-uploads.adoc +++ b/modules/metrics-multipart-uploads.adoc @@ -8,8 +8,8 @@ The multipart uploads metrics show the number of blobs uploads to storage (S3, R [options="header"] |=== | Metric name | Description -| quay_multipart_uploads_started_total | Number of multipart uploads to Quay storage that completed -| quay_multipart_uploads_completed_total | Number of multipart uploads to Quay storage that started +| quay_multipart_uploads_started_total | Number of multipart uploads to Quay storage that started +| quay_multipart_uploads_completed_total | Number of multipart uploads to Quay storage that completed |=== diff --git a/modules/mirroring-api-intro.adoc b/modules/mirroring-api-intro.adoc index 453319df8..6c9200be1 100644 --- a/modules/mirroring-api-intro.adoc +++ b/modules/mirroring-api-intro.adoc @@ -1,8 +1,10 @@ -[[mirroring-api-intro]] +:_content-type: CONCEPT +[id="arch-mirroring-api-intro"] = Mirroring API -You can use the Quay API to configure repository mirroring: +You can use the {productname} API to configure repository mirroring: +.Mirroring API image:swagger-mirroring.png[Mirroring API] More information is available in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_api_guide/index[{productname} API Guide] diff --git a/modules/mirroring-creating-repo.adoc b/modules/mirroring-creating-repo.adoc index dcf9759a1..734e73113 100644 --- a/modules/mirroring-creating-repo.adoc +++ b/modules/mirroring-creating-repo.adoc @@ -1,15 +1,23 @@ -[[mirroring-creating-repo]] +:_content-type: PROCEDURE +[id="mirroring-creating-repo"] = Creating a mirrored repository -The steps shown in this section assume you already have enabled repository mirroring in the configuration for your {productname} cluster and that you have a deployed a mirroring worker. - -When mirroring a repository from an external container registry, create a new private repository. Typically the same name is used as the target repository, for example, `quay-rhel8`: +When mirroring a repository from an external container registry, you must create a new private repository. Typically, the same name is used as the target repository, for example, `quay-rhel8`. image:repo_quay_rhel8.png[Create new {productname} repo] - +[id="mirroring-repository-mirroring-settings"] == Repository mirroring settings +Use the following procedure to adjust the settings of your mirrored repository. + +.Prerequisites + +* You have enabled repository mirroring in your {productname} configuration file. +* You have deployed a mirroring worker. + +.Procedure + . In the Settings tab, set the Repository State to `Mirror`: + image:repo_mirror_create.png[Create a new {productname} repo mirror] @@ -21,16 +29,8 @@ image:repo-mirror-details-start.png[Repository mirroring] . Enter the details as required in the following fields: + * **Registry Location:** The external repository you want to mirror, for example, `registry.redhat.io/quay/quay-rhel8` -* **Tags:** This field is required. You may enter a comma-separated list of individual tags or -tag patterns. (See _Tag Patterns_ section for details.) -+ -[NOTE] -==== -In order for Quay to get the list of tags in the remote repository, one of the following requirements must be met: - -* An image with the "latest" tag must exist in the remote repository _OR_ -* At least one explicit tag, without pattern matching, must exist in the list of tags that you specify -==== +* **Tags:** This field is required. You may enter a comma-separated list of individual tags or tag patterns. (See _Tag Patterns_ section for details.) + * **Start Date:** The date on which mirroring begins. The current date and time is used by default. * **Sync Interval:** Defaults to syncing every 24 hours. You can change that based on hours or days. * **Robot User:** Create a new robot account or choose an existing robot account to do the mirroring. @@ -38,17 +38,25 @@ In order for Quay to get the list of tags in the remote repository, one of the f * **Password:** The password associated with the Username. Note that the password cannot include characters that require an escape character (\). +[id="mirroring-advanced-settings"] == Advanced settings -* In the Advanced Settings section, configure TLS and proxy, if required: +In the *Advanced Settings* section, you can configure SSL/TLS and proxy with the following options: + +* **Verify TLS:** Select this option if you want to require HTTPS and to verify certificates when communicating with the target remote registry. +* **Accept Unsigned Images:** Selecting this option allows unsigned images to be mirrored. +* **HTTP Proxy:** Select this option if you want to require HTTPS and to verify certificates when communicating with the target remote registry. +* **HTTPS PROXY:** Identify the HTTPS proxy server needed to access the remote site, if a proxy server is needed. +* **No Proxy:** List of locations that do not require proxy. -* **Verify TLS:** Check this box if you want to require HTTPS and to verify certificates, when communicating with the target remote registry. -* **HTTP Proxy:** Identify the HTTP proxy server needed to access the remote site, if one is required. -* **HTTPS Proxy:** Identify the HTTPS proxy server needed to access the remote site, if one is required. -* **No Proxy:** List of locations that do not require proxy +[id="mirroring-synchronize-now"] == Synchronize now +Use the following procedure to initiate the mirroring operation. + +.Procedure + * To perform an immediate mirroring operation, press the Sync Now button on the repository's Mirroring tab. The logs are available on the Usage Logs tab: + image:repo-mirror-usage-logs.png[Usage logs] @@ -57,6 +65,6 @@ When the mirroring is complete, the images will appear in the Tags tab: + image:repo-mirror-tags.png[Repository mirroring tags] + -Below is an example of a completed Repository Mirroring screen: +Below is an example of a completed Repository Mirroring screen: + image:repo-mirror-details.png[Repository mirroring details] diff --git a/modules/mirroring-events.adoc b/modules/mirroring-events.adoc index 133a79b51..895aee025 100644 --- a/modules/mirroring-events.adoc +++ b/modules/mirroring-events.adoc @@ -1,4 +1,5 @@ -[[mirroring-events]] +:_content-type: CONCEPT +[id="arch-mirroring-events"] = Event notifications for mirroring There are three notification events for repository mirroring: @@ -7,4 +8,4 @@ There are three notification events for repository mirroring: * Repository Mirror Success * Repository Mirror Unsuccessful -The events can be configured inside the Settings tab for each repository, and all existing notification methods such as email, slack, Quay UI and webhooks are supported. \ No newline at end of file +The events can be configured inside of the *Settings* tab for each repository, and all existing notification methods such as email, Slack, Quay UI, and webhooks are supported. \ No newline at end of file diff --git a/modules/mirroring-intro.adoc b/modules/mirroring-intro.adoc index c95e61bc7..8ba7b3466 100644 --- a/modules/mirroring-intro.adoc +++ b/modules/mirroring-intro.adoc @@ -1,24 +1,21 @@ -[[mirroring-intro]] +:_content-type: CONCEPT +[id="arch-mirroring-intro"] = Repository mirroring -{productname} repository mirroring lets you mirror images from external container registries -(or another local registry) into your {productname} cluster. -Using repository mirroring, you can synchronize images to {productname} based on repository names and tags. +{productname} repository mirroring lets you mirror images from external container registries, or another local registry, into your {productname} cluster. Using repository mirroring, you can synchronize images to {productname} based on repository names and tags. -From your {productname} cluster with repository mirroring enabled, you can: +From your {productname} cluster with repository mirroring enabled, you can perform the following: * Choose a repository from an external registry to mirror -* Add credentials to access the external registry +* Add credentials to access the external registry * Identify specific container image repository names and tags to sync * Set intervals at which a repository is synced * Check the current state of synchronization +To use the mirroring functionality, you need to perform the following actions: -To use the mirroring functionality, you need to: - -* Enable Repository Mirroring in the {productname} configuration +* Enable repository mirroring in the {productname} configuration file * Run a repository mirroring worker * Create mirrored repositories -All repository mirroring configuration can be performed using the configuration tool UI or via the Quay API - +All repository mirroring configurations can be performed using the configuration tool UI or by the {productname} API. \ No newline at end of file diff --git a/modules/mirroring-recommend.adoc b/modules/mirroring-recommend.adoc index 13bf07013..164f4c4fb 100644 --- a/modules/mirroring-recommend.adoc +++ b/modules/mirroring-recommend.adoc @@ -1,11 +1,19 @@ -[[mirroring-recommend]] +:_content-type: CONCEPT +[id="arch-mirroring-recommend"] = Repository mirroring recommendations -* Repository mirroring pods can run on any node including other nodes where Quay is already running -* Repository mirroring is scheduled in the database and run in batches. As a result, more workers could mean faster mirroring, since more batches will be processed. -* The optimal number of mirroring pods depends on: +Best practices for repository mirroring include the following: + +* Repository mirroring pods can run on any node. This means that you can run mirroring on nodes where {productname} is already running. + +* Repository mirroring is scheduled in the database and runs in batches. As a result, repository workers check each repository mirror configuration file and reads when the next sync needs to be. More mirror workers means more repositories can be mirrored at the same time. For example, running 10 mirror workers means that a user can run 10 mirroring operators in parallel. If a user only has 2 workers with 10 mirror configurations, only 2 operators can be performed. + +* The optimal number of mirroring pods depends on the following conditions: + ** The total number of repositories to be mirrored -** The number of images and tags in the repositories and the frequency of changes -** Parallel batches -* You should balance your mirroring schedule across all mirrored repositories, so that they do not all start up at the same time. -* For a mid-size deployment, with approximately 1000 users and 1000 repositories, and with roughly 100 mirrored repositories, it is expected that you would use 3-5 mirroring pods, scaling up to 10 if required. +** The number of images and tags in the repositories and the frequency of changes +** Parallel batching ++ +For example, if a user is mirroring a repository that has 100 tags, the mirror will be completed by one worker. Users must consider how many repositories one wants to mirror in parallel, and base the number of workers around that. ++ +Multiple tags in the same repository cannot be mirrored in parallel. \ No newline at end of file diff --git a/modules/mirroring-tag-patterns.adoc b/modules/mirroring-tag-patterns.adoc index 4140d6cf5..6c980c939 100644 --- a/modules/mirroring-tag-patterns.adoc +++ b/modules/mirroring-tag-patterns.adoc @@ -1,10 +1,7 @@ [[mirroring-tag-patterns]] = Mirroring tag patterns -As noted above, at least one Tag must be explicitly entered (ie. not a tag pattern) _or_ the tag -"latest" must exist in the report repository. (The tag "latest" will not be synced unless -specified in the tag list.). This is required for Quay to get the list of tags -in the remote repository to compare to the specified list to mirror. +At least one tag must be entered. The following table references possible image tag patterns. == Pattern syntax diff --git a/modules/mirroring-using.adoc b/modules/mirroring-using.adoc index 1653421e9..048a3f010 100644 --- a/modules/mirroring-using.adoc +++ b/modules/mirroring-using.adoc @@ -1,26 +1,22 @@ -[[mirroring-using]] +:_content-type: CONCEPT +[id="arch-mirroring-using"] = Using repository mirroring -Here are some features and limitations of {productname} repository mirroring: +The following list shows features and limitations of {productname} repository mirroring: -* With repository mirroring, you can mirror an entire repository or selectively -limit which images are synced. Filters can be based on a comma-separated list of tags, a -range of tags, or other means of identifying tags through -regular expressions. +* With repository mirroring, you can mirror an entire repository or selectively limit which images are synced. Filters can be based on a comma-separated list of tags, a range of tags, or other means of identifying tags through Unix shell-style wildcards. For more information, see the documentation for link:https://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm[wildcards]. -* Once a repository is set as mirrored, you cannot manually add other images to that repository. +* When a repository is set as mirrored, you cannot manually add other images to that repository. -* Because the mirrored repository is based on the repository and tags you set, -it will hold only the content represented by the repo/tag pair. In other words, if you change +* Because the mirrored repository is based on the repository and tags you set, it will hold only the content represented by the repository and tag pair. For example if you change the tag so that some images in the repository no longer match, those images will be deleted. -* Only the designated robot can push images to a mirrored repository, -superseding any role-based access control permissions set on the repository. +* Only the designated robot can push images to a mirrored repository, superseding any role-based access control permissions set on the repository. -* With a mirrored repository, a user can pull images (given read permission) -from the repository but not push images to the repository. +* Mirroring can be configured to rollback on failure, _or_ to run on a best-effort basis. -* Changing settings on your mirrored repository is done from the Mirrors tab -on the Repositories page for the mirrored repository you create. +* With a mirrored repository, a user with _read_ permissions can pull images from the repository but cannot push images to the repository. + +* Changing settings on your mirrored repository can be performed in the {productname} user interface, using the *Repositories* -> *Mirrors* tab for the mirrored repository you create. * Images are synced at set intervals, but can also be synced on demand. \ No newline at end of file diff --git a/modules/mirroring-versus-georepl.adoc b/modules/mirroring-versus-georepl.adoc index 631c1edda..3a8883003 100644 --- a/modules/mirroring-versus-georepl.adoc +++ b/modules/mirroring-versus-georepl.adoc @@ -1,31 +1,36 @@ -[[mirroring-versus-georepl]] -= Repository mirroring versus geo-replication +:_content-type: CONCEPT +[id="mirroring-versus-georepl"] += Repository mirroring compared to geo-replication -Quay geo-replication mirrors the entire image storage backend data between 2 or more different storage backends while the database is shared (one Quay registry with two different blob storage endpoints). The primary use cases for geo-replication are: +{productname} geo-replication mirrors the entire image storage backend data between 2 or more different storage backends while the database is shared, for example, one {productname} registry with two different blob storage endpoints. The primary use cases for geo-replication include the following: * Speeding up access to the binary blobs for geographically dispersed setups + * Guaranteeing that the image content is the same across regions -Repository mirroring synchronizes selected repositories (or subsets of repositories) from one registry to another. The registries are distinct, with registry is separate database and image storage. The primary use cases for mirroring are: +Repository mirroring synchronizes selected repositories, or subsets of repositories, from one registry to another. The registries are distinct, with each registry having a separate database and separate image storage. + +The primary use cases for mirroring include the following: -* Independent registry deployments in different datacenters or regions, where a certain subset of the overall content is supposed to be shared across the datacenters / regions -* Automatic synchronization or mirroring of selected (whitelisted) upstream repositories from external registries into a local Quay deployment +* Independent registry deployments in different data centers or regions, where a certain subset of the overall content is supposed to be shared across the data centers and regions +* Automatic synchronization or mirroring of selected (allowlisted) upstream repositories from external registries into a local {productname} deployment [NOTE] ==== Repository mirroring and geo-replication can be used simultaneously. ==== -.{productname} Repository mirroring versus geo-replication +.{productname} Repository mirroring and geo-replication comparison [width="100%",options="header"] |=== | Feature / Capability | Geo-replication | Repository mirroring | What is the feature designed to do? | A shared, global registry | Distinct, different registries -| What happens if replication or mirroring hasn’t been completed yet? | The remote copy is used (slower) | No image is served +| What happens if replication or mirroring has not been completed yet? | The remote copy is used (slower) | No image is served | Is access to all storage backends in both regions required? | Yes (all {productname} nodes) | No (distinct storage) | Can users push images from both sites to the same repository? | Yes | No -| Is all registry content and configuration identical across all regions (shared database) | Yes | No -| Can users select individual namespaces or repositories to be mirrored? | No,by default | Yes +| Is all registry content and configuration identical across all regions (shared database)? | Yes | No +| Can users select individual namespaces or repositories to be mirrored? | No | Yes | Can users apply filters to synchronization rules? | No | Yes +| Are individual / different role-base access control configurations allowed in each region | No | Yes |=== diff --git a/modules/mirroring-worker.adoc b/modules/mirroring-worker.adoc index 85505eadb..0ddf80aa1 100644 --- a/modules/mirroring-worker.adoc +++ b/modules/mirroring-worker.adoc @@ -1,21 +1,25 @@ -[[mirroring-worker]] +[id="mirroring-worker"] = Mirroring worker -* To run the repository mirroring worker, start by running a `Quay` pod with the `repomirror` option: +Use the following procedure to start the repository mirroring worker. + +.Procedure + +* If you have not configured TLS communications using a `/root/ca.crt` certificate, enter the following command to start a `Quay` pod with the `repomirror` option: + [subs="verbatim,attributes"] -``` +---- $ sudo podman run -d --name mirroring-worker \ -v $QUAY/config:/conf/stack:Z \ {productrepo}/{quayimage}:{productminv} repomirror -``` +---- -* If you have configured TLS communications using a certificate `/root/ca.crt`, then the following example shows how to start the mirroring worker: +* If you have configured TLS communications using a `/root/ca.crt` certificate, enter the following command to start the repository mirroring worker: + [subs="verbatim,attributes"] -``` +---- $ sudo podman run -d --name mirroring-worker \ -v $QUAY/config:/conf/stack:Z \ - -v /root/ca.crt:/etc/pki/ca-trust/source/anchors/ca.crt \ + -v /root/ca.crt:/etc/pki/ca-trust/source/anchors/ca.crt:Z \ {productrepo}/{quayimage}:{productminv} repomirror -``` +---- diff --git a/modules/monitoring-single-namespace.adoc b/modules/monitoring-single-namespace.adoc index 4df04b386..00c914d57 100644 --- a/modules/monitoring-single-namespace.adoc +++ b/modules/monitoring-single-namespace.adoc @@ -1,26 +1,39 @@ -[[monitoring-single-namespace]] -= Enabling monitoring when Operator is installed in a single namespace +:_content-type: PROCEDURE +[id="monitoring-single-namespace"] += Enabling monitoring when the {productname} Operator is installed in a single namespace -When {productname} Operator is installed in a single namespace, the monitoring component is unmanaged. To configure monitoring, you need to enable it for user-defined namespaces in OpenShift Container Platform. For more information, see the OCP documentation for link:https://docs.openshift.com/container-platform/4.7/monitoring/configuring-the-monitoring-stack.html[Configuring the monitoring stack] and link:https://docs.openshift.com/container-platform/4.7/monitoring/enabling-monitoring-for-user-defined-projects.html[Enabling monitoring for user-defined projects]. +When the {productname} Operator is installed in a single namespace, the monitoring component is set to `unmanaged`. To configure monitoring, you need to enable it for user-defined namespaces in {ocp}. -The following steps show you how to configure monitoring for Quay, based on the OCP documentation. +For more information, see the {ocp} documentation for link:https://docs.openshift.com/container-platform/4.7/monitoring/configuring-the-monitoring-stack.html[Configuring the monitoring stack] and link:https://docs.openshift.com/container-platform/4.7/monitoring/enabling-monitoring-for-user-defined-projects.html[Enabling monitoring for user-defined projects]. +The following sections shows you how to enable monitoring for {productname} based on the {ocp} documentation. + +[id="creating-cluster-monitoring-config-map"] == Creating a cluster monitoring config map -. Check whether the `cluster-monitoring-config` ConfigMap object exists: +Use the following procedure check if the `cluster-monitoring-config` `ConfigMap` object exists. + +.Procedure + +. Enter the following command to check whether the `cluster-monitoring-config` ConfigMap object exists: + -``` +[source,terminal] +---- $ oc -n openshift-monitoring get configmap cluster-monitoring-config - +---- ++ +.Example output ++ +[source,terminal] +---- Error from server (NotFound): configmaps "cluster-monitoring-config" not found -``` +---- -. If the ConfigMap object does not exist:  -.. Create the following YAML manifest. In this example, the file is called `cluster-monitoring-config.yaml`: +. Optional: If the `ConfigMap` object does not exist, create a YAML manifest. In the following example, the file is called `cluster-monitoring-config.yaml`. + +[source,terminal] ---- -$ cat cluster-monitoring-config.yaml - +cat < cluster-monitoring-config.yaml apiVersion: v1 kind: ConfigMap metadata: @@ -28,37 +41,56 @@ metadata: namespace: openshift-monitoring data: config.yaml: | +EOF ---- -.. Create the ConfigMap object: + +. Optional: If the `ConfigMap` object does not exist, create the `ConfigMap` object: + +[source,terminal] ---- $ oc apply -f cluster-monitoring-config.yaml configmap/cluster-monitoring-config created ---- + +. Ensure that the `ConfigMap` object exists by running the following command: + +[source,terminal] ---- $ oc -n openshift-monitoring get configmap cluster-monitoring-config - +---- ++ +.Example output ++ +[source,terminal] +---- NAME DATA AGE cluster-monitoring-config 1 12s ---- +[id="creating-user-defined-workload-monitoring-config-map"] +== Creating a user-defined workload monitoring ConfigMap object +Use the following procedure check if the `user-workload-monitoring-config` `ConfigMap` object exists. -== Creating a user-defined workload monitoring config map +.Procedure -. Check whether the `user-workload-monitoring-config` ConfigMap object exists: +. Enter the following command to check whether the `user-workload-monitoring-config` `ConfigMap` object exists: + ---- $ oc -n openshift-user-workload-monitoring get configmap user-workload-monitoring-config - +---- ++ +.Example output ++ +[source,terminal] +---- Error from server (NotFound): configmaps "user-workload-monitoring-config" not found ---- -. If the ConfigMap object does not exist: -.. Create the following YAML manifest. In this example, the file is called `user-workload-monitoring-config.yaml`: +. If the `ConfigMap` object does not exist, create a YAML manifest. In the following example, the file is called `user-workload-monitoring-config.yaml`. + +[source,terminal] ---- -$ cat user-workload-monitoring-config.yaml +cat < user-workload-monitoring-config.yaml apiVersion: v1 kind: ConfigMap @@ -67,35 +99,50 @@ metadata: namespace: openshift-user-workload-monitoring data: config.yaml: | +EOF ---- -.. Create the ConfigMap object: + +. Optional: Create the `ConfigMap` object by entering the following command: + +[source,terminal] ---- $ oc apply -f user-workload-monitoring-config.yaml - +---- ++ +.Example output ++ +[source,terminal] +---- configmap/user-workload-monitoring-config created ---- +[id="enabling-monitoring-user-defined-projects"] +== Enable monitoring for user-defined projects +Use the following procedure to enable monitoring for user-defined projects. +.Procedure -== Enable monitoring for user-defined projects - -. Check whether monitoring for user-defined projects is running: +. Enter the following command to check if monitoring for user-defined projects is running: + ---- $ oc get pods -n openshift-user-workload-monitoring - +---- ++ +.Example output ++ +[source,terminal] +---- No resources found in openshift-user-workload-monitoring namespace. ---- -. Edit the  `cluster-monitoring-config` ConfigMap: +. Edit the `cluster-monitoring-config` `ConfigMap` by entering the following command: + ---- $ oc -n openshift-monitoring edit configmap cluster-monitoring-config ---- -  -. Set `enableUserWorkload: true` to enable monitoring for user-defined projects on the cluster: + +. Set `enableUserWorkload: true` in your `config.yaml` file to enable monitoring for user-defined projects on the cluster: + [source,yaml] ---- @@ -107,11 +154,17 @@ kind: ConfigMap metadata: annotations: ---- -. Save the file to apply the changes and then check that the appropriate pods are running: + +. Enter the following command to save the file, apply the changes, and ensure that the appropriate pods are running: + ---- $ oc get pods -n openshift-user-workload-monitoring - +---- ++ +.Example output ++ +[source,terminal] +---- NAME READY STATUS RESTARTS AGE prometheus-operator-6f96b4b8f8-gq6rl 2/2 Running 0 15s prometheus-user-workload-0 5/5 Running 1 12s @@ -119,14 +172,18 @@ prometheus-user-workload-1 5/5 Running 1 12s thanos-ruler-user-workload-0 3/3 Running 0 8s thanos-ruler-user-workload-1 3/3 Running 0 8s ---- -  -== Create a Service object to expose Quay metrics +[id="creating-service-object-expose-quay-metrics"] +== Creating a Service object to expose {productname} metrics + +Use the following procedure to create a `Service` object to expose {productname} metrics. + +.Procedure . Create a YAML file for the Service object: + ---- -$ cat quay-service.yaml +$ cat < quay-service.yaml apiVersion: v1 kind: Service @@ -147,26 +204,34 @@ spec: quay-component: quay-app quay-operator/quayregistry: example-registry type: ClusterIP +EOF ---- -  -  -. Create the Service object: + +. Create the `Service` object by entering the following command: + +[source,terminal] ---- $ oc apply -f quay-service.yaml - +---- ++ +.Example output ++ +[source,terminal] +---- service/example-registry-quay-metrics created ---- -== Create a ServiceMonitor object +[id="creating-servicemonitor-object"] +== Creating a ServiceMonitor object -Configure OpenShift Monitoring to scrape the metrics by creating a ServiceMonitor resource. +Use the following procedure to configure OpenShift Monitoring to scrape the metrics by creating a `ServiceMonitor` resource. +.Procedure -. Create a YAML file for the ServiceMonitor resource: +. Create a YAML file for the `ServiceMonitor` resource: + ---- -$ cat quay-service-monitor.yaml +$ cat < quay-service-monitor.yaml apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -178,28 +243,34 @@ metadata: spec: endpoints: - port: quay-metrics - namespaceSelector: - any: true + namespaceSelector: + any: true selector: matchLabels: quay-component: monitoring +EOF ---- -. Create the ServiceMonitor: +. Create the `ServiceMonitor` resource by entering the following command: + ---- -$ oc apply -f quay-service-monitor.yaml - +$ oc apply -f quay-service-monitor.yaml +---- ++ +.Example output ++ +[source,terminal] +---- servicemonitor.monitoring.coreos.com/example-registry-quay-metrics-monitor created ---- -== View the metrics in OpenShift +[id="view-metrics-in-ocp"] +== Viewing metrics in {ocp} -You can access the metrics in the OpenShift console under Monitoring -> Metrics. In the Expression field, enter the text `quay_` to see the list of metrics available: +You can access the metrics in the {ocp} console under *Monitoring* -> *Metrics*. In the Expression field, enter *quay_* to see the list of metrics available: image:metrics-single-namespace.png[Quay metrics] - -For example, if you have added users to your registry, select the `quay-users_rows` metric: +For example, if you have added users to your registry, select the *quay-users_rows* metric: image:metrics-single-namespace-users.png[Quay metrics] diff --git a/modules/oci-intro.adoc b/modules/oci-intro.adoc index 9880a7200..31140f71e 100644 --- a/modules/oci-intro.adoc +++ b/modules/oci-intro.adoc @@ -1,10 +1,10 @@ [[oci-intro]] = OCI Support and {productname} -Container registries such as {productname} were originally designed to support container images in the Docker image format. To promote the use of additional runtimes apart from Docker, the Open Container Initiative (OCI) was created to provide a standardization surrounding container runtimes and image formats. Most container registries support the OCI standardization as it is based on the link:https://docs.docker.com/registry/spec/manifest-v2-2/[Docker image manifest V2, Schema 2] format. +Container registries such as {productname} were originally designed to support container images in the Docker image format. To promote the use of additional runtimes apart from Docker, the Open Container Initiative (OCI) was created to provide a standardization surrounding container runtimes and image formats. Most container registries support the OCI standardization as it is based on the link:https://docs.docker.com/registry/spec/manifest-v2-2/[Docker image manifest V2, Schema 2] format. -In addition to container images, a variety of artifacts have emerged that support not just individual applications, but the Kubernetes platform as a whole. These range from Open Policy Agent (OPA) policies for security and governance to Helm charts and Operators to aid in application deployment. +In addition to container images, a variety of artifacts have emerged that support not just individual applications, but the Kubernetes platform as a whole. These range from Open Policy Agent (OPA) policies for security and governance to Helm charts and Operators to aid in application deployment. -{productname} is a private container registry that not only stores container images, but supports an entire ecosystem of tooling to aid in the management of containers. Support for OCI based artifacts in {producty} has extended from solely Helm to include cosign and ztsd compression schemes by default. As such, `FEATURE_HELM_OCI_SUPPORT` has been deprecated. +{productname} is a private container registry that not only stores container images, but supports an entire ecosystem of tooling to aid in the management of containers. Support for OCI based artifacts in {productname} 3.6 has extended from solely Helm to include cosign and ztsd compression schemes by default. As such, `FEATURE_HELM_OCI_SUPPORT` has been deprecated. -When {productname} {producty} is deployed using the OpenShift Operator, support for Helm and OCI artifacts is now enabled by default under the `FEATURE_GENERAL_OCI_SUPPORT` configuration. If you need to explicitly enable the feature, for example, if it has previously been disabled or if you have upgraded from a version where it is not enabled by default, see the section xref:config-fields-helm-oci[Explicitly enabling OCI and Helm support]. +When {productname} 3.6 is deployed using the OpenShift Operator, support for Helm and OCI artifacts is now enabled by default under the `FEATURE_GENERAL_OCI_SUPPORT` configuration. If you need to explicitly enable the feature, for example, if it has previously been disabled or if you have upgraded from a version where it is not enabled by default, see the section xref:config-fields-helm-oci[Explicitly enabling OCI and Helm support]. diff --git a/modules/operator-cloudfront.adoc b/modules/operator-cloudfront.adoc index 30473fba5..322843982 100644 --- a/modules/operator-cloudfront.adoc +++ b/modules/operator-cloudfront.adoc @@ -1,7 +1,14 @@ -[[operator-cloudfront]] +:_content-type: PROCEDURE +[id="operator-cloudfront"] = AWS S3 CloudFront -If you use AWS S3 CloudFront for backend registry storage, specify the private key as shown in the following example: -.... +Use the following procedure if you are using AWS S3 Cloudfront for your backend registry storage. + +.Procedure + +. Enter the following command to specify the registry key: ++ +[source,terminal] +---- $ oc create secret generic --from-file config.yaml=./config_awss3cloudfront.yaml --from-file default-cloudfront-signing-key.pem=./default-cloudfront-signing-key.pem test-config-bundle -.... \ No newline at end of file +---- \ No newline at end of file diff --git a/modules/operator-components-intro.adoc b/modules/operator-components-intro.adoc index 2ca358cec..0c7482cee 100644 --- a/modules/operator-components-intro.adoc +++ b/modules/operator-components-intro.adoc @@ -10,23 +10,27 @@ In the `QuayRegistry` custom resource, the `spec.components` field configures co ---- spec: components: - - managed: true - kind: clair - - managed: true - kind: postgres - - managed: true - kind: objectstorage - - managed: true - kind: redis - - managed: true - kind: horizontalpodautoscaler - - managed: true - kind: route - - managed: true - kind: mirror - - managed: true - kind: monitoring - - managed: true - kind: tls + - kind: quay + managed: true + - kind: postgres + managed: true + - kind: clair + managed: true + - kind: redis + managed: true + - kind: horizontalpodautoscaler + managed: true + - kind: objectstorage + managed: true + - kind: route + managed: true + - kind: mirror + managed: true + - kind: monitoring + managed: true + - kind: tls + managed: true + - kind: clairpostgres + managed: true ---- diff --git a/modules/operator-components-managed.adoc b/modules/operator-components-managed.adoc index 23159040c..b9823c18e 100644 --- a/modules/operator-components-managed.adoc +++ b/modules/operator-components-managed.adoc @@ -1,26 +1,24 @@ -[[operator-components-managed]] +[id="operator-components-managed"] = Using managed components +Unless your `QuayRegistry` custom resource specifies otherwise, the {productname} Operator uses defaults for the following managed components: -Unless your `QuayRegistry` custom resource specifies otherwise, the Operator will use defaults for the following managed components: - -* **postgres:** For storing the registry metadata, +* **quay:** Holds overrides for the {productname} deployment. For example, environment variables and number of replicas. This component is new in {productname} 3.7 and cannot be set to unmanaged. +* **postgres:** For storing the registry metadata, ifeval::["{productname}" == "Red Hat Quay"] uses a version of Postgres 10 from the link:https://www.softwarecollections.org/en/[Software Collections] endif::[] ifeval::["{productname}" == "Project Quay"] uses an upstream (CentOS) version of Postgres 10 endif::[] -* **redis:** Handles Quay builder coordination and some internal logging -* **objectstorage:** For storing image layer blobs, utilizes the `ObjectBucketClaim` Kubernetes API which is provided by Noobaa/RHOCS * **clair:** Provides image vulnerability scanning -* **horizontalpodautoscaler:** Adjusts the number of Quay pods depending on memory/cpu consumption -* **mirror:** Configures repository mirror workers (to support optional repository mirroring) -* **route:** Provides an external entrypoint to the Quay registry from outside OpenShift +* **redis:** Stores live builder logs and the {productname} tutorial. Also includes the locking mechanism that is required for garbage collection. +* **horizontalpodautoscaler:** Adjusts the number of `Quay` pods depending on memory/cpu consumption +* **objectstorage:** For storing image layer blobs, utilizes the `ObjectBucketClaim` Kubernetes API which is provided by Noobaa/RHOCS +* **route:** Provides an external entrypoint to the {productname} registry from outside of {ocp} +* **mirror:** Configures repository mirror workers to support optional repository mirroring * **monitoring:** Features include a Grafana dashboard, access to individual metrics, and alerting to notify for frequently restarting Quay pods -* **tls:** Configures whether {productname} or OpenShift handles TLS - -The Operator will handle any required configuration and installation work needed for {productname} to use the managed components. If the opinionated deployment performed by the Quay Operator is unsuitable for your environment, you can provide the Operator with `unmanaged` resources (overrides) as described in the following sections. - - +* **tls:** Configures whether {productname} or {ocp} handles SSL/TLS +* **clairpostgres:** Configures a managed Clair database +The {productname} Operator handles any required configuration and installation work needed for {productname} to use the managed components. If the opinionated deployment performed by the {productname} Operator is unsuitable for your environment, you can provide the {productname} Operator with `unmanaged` resources (overrides) as described in the following sections. \ No newline at end of file diff --git a/modules/operator-components-unmanaged-other.adoc b/modules/operator-components-unmanaged-other.adoc index 2ff86b41d..2cf1fce24 100644 --- a/modules/operator-components-unmanaged-other.adoc +++ b/modules/operator-components-unmanaged-other.adoc @@ -1,6 +1,5 @@ -[[operator-components-unmanaged-other]] -= Configuring other components - - -// TODO 36 Clair unmanaged +:_content-type: CONCEPT +[id="operator-components-unmanaged-other"] += Configuring external Redis +Use the content in this section to an external Redis deployment. \ No newline at end of file diff --git a/modules/operator-config-cli-access.adoc b/modules/operator-config-cli-access.adoc index 0a6b2cdb4..11b77be7a 100644 --- a/modules/operator-config-cli-access.adoc +++ b/modules/operator-config-cli-access.adoc @@ -16,15 +16,19 @@ metadata: ... spec: components: + - kind: quay + managed: true ... - configBundleSecret: example-registry-quay-config-bundle-fjpnm + - kind: clairpostgres + managed: true + configBundleSecret: init-config-bundle-secret status: - configEditorCredentialsSecret: example-registry-quay-config-editor-credentials-kk55dc7299 - configEditorEndpoint: https://example-registry-quay-config-editor-quay-enterprise.apps.docs.quayteam.org - currentVersion: 3.6.0 - lastUpdated: 2021-09-21 11:18:13.285192787 +0000 UTC - registryEndpoint: https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org - unhealthyComponents: {} + configEditorCredentialsSecret: example-registry-quay-config-editor-credentials-fg2gdgtm24 + configEditorEndpoint: https://example-registry-quay-config-editor-quay-enterprise.apps.docs.gcp.quaydev.org + currentVersion: 3.7.0 + lastUpdated: 2022-05-11 13:28:38.199476938 +0000 UTC + registryEndpoint: https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org + ---- The relevant fields are: @@ -41,7 +45,7 @@ To determine the username and password for the config editor tool: + [source,yaml] ---- -$ oc get secret -n quay-enterprise example-registry-quay-config-editor-credentials-kk55dc7299 -o yaml +$ oc get secret -n quay-enterprise example-registry-quay-config-editor-credentials-fg2gdgtm24 -o yaml apiVersion: v1 data: diff --git a/modules/operator-config-cli-download.adoc b/modules/operator-config-cli-download.adoc index 6b95ecf61..8f89608ac 100644 --- a/modules/operator-config-cli-download.adoc +++ b/modules/operator-config-cli-download.adoc @@ -3,7 +3,7 @@ There are a number of methods for accessing the current configuration: -. Using the config editor endpoint, specifying the username and password for the config editor: +. Using the config editor endpoint, specifying the username and password for the config editor: + [source,bash] ---- @@ -29,36 +29,33 @@ $ curl -k -u quayconfig:JFpBEJMCtkPTfjxt https://example-registry-quay-config-ed + [source,bash] ---- -$ oc get secret -n quay-enterprise example-registry-quay-config-bundle-jkfhs -o jsonpath='{.data}' +$ oc get secret -n quay-enterprise init-config-bundle-secret -o jsonpath='{.data}' ---- + +.Sample output [source,yaml] ---- { - "config.yaml": "QUxMT1dfUFVMTFNfV0lUSE9VVF9TVFJJQ1RfTE9HR0lORzogZmFsc2UKQVVUSEVOVElDQVRJT05fVFlQRTogRGF0YWJhc2UKQVZBVEFSX0tJTkQ6IGxvY2FsCkRBVEFCQVNFX1NFQ1JFVF9LRVk6IHhlOEc1VDBNbkllaGxNQzNkTjd3MWR5WWxwVmo0a0R2enlxZ3l6Ulp5ZjFpODBmWWU3VDUxU1FPZ3hXelpocFlqYlVxNzRKaDllVVVEVWpyCkRFR -... -OgotIDJ3ClRFQU1fUkVTWU5DX1NUQUxFX1RJTUU6IDYwbQpURVNUSU5HOiBmYWxzZQpVU0VSX1JFQ09WRVJZX1RPS0VOX0xJRkVUSU1FOiAzMG0K", - "extra_ca_cert_service-ca.crt": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURVVENDQWptZ0F3SUJBZ0lJRE9kWFhuUXFjMUF3RFFZSktvWklodmNOQVFFTEJRQXdOakUwTURJR0ExVUUKQXd3cmIzQmxibk5vYVdaMExYTmxjblpwWTJVdGMyVnlkbWx1WnkxemFXZHVaWEpBTVRZek1UYzNPREV3TXpBZQpGdzB5TVRBNU1UWXdOelF4TkRKYUZ3MHl -... -XSW1jaApkQXZTWGpFUnZOZEZzN3pHK1VzTmZwN0ZIQkJVWkY4L2RZNWJCR2MwWTVaY0J6bFNjQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" + "config.yaml": "RkVBVFVSRV9VU0 ... MDAwMAo=" } ---- .. Decode the data: + [source,bash] ---- -$ echo 'QUxMT1dfUFVMTFN...U1FOiAzMG0K' | base64 --decode +$ echo 'RkVBVFVSRV9VU0 ... MDAwMAo=' | base64 --decode ---- + [source,yaml] ---- -ALLOW_PULLS_WITHOUT_STRICT_LOGGING: false -AUTHENTICATION_TYPE: Database -... -TAG_EXPIRATION_OPTIONS: -- 2w -TEAM_RESYNC_STALE_TIME: 60m -TESTING: false -USER_RECOVERY_TOKEN_LIFETIME: 30m +FEATURE_USER_INITIALIZE: true +BROWSER_API_CALLS_XHR_ONLY: false +SUPER_USERS: +- quayadmin +FEATURE_USER_CREATION: false +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_PROXY_CACHE: true +FEATURE_BUILD_SUPPORT: true +DEFAULT_SYSTEM_REJECT_QUOTA_BYTES: 102400000 ---- diff --git a/modules/operator-config-cli.adoc b/modules/operator-config-cli.adoc index 339ec8514..14a5c43c9 100644 --- a/modules/operator-config-cli.adoc +++ b/modules/operator-config-cli.adoc @@ -1,20 +1,23 @@ [[operator-config-cli]] -= Configuring Quay on OpenShift using the command line and API += Configuring Quay on OpenShift Once deployed, you can configure the Quay application by editing the Quay configuration bundle secret `spec.configBundleSecret` and you can also change the managed status of components in the `spec.components` object of the QuayRegistry resource -The Operator does not watch the `spec.configBundleSecret` resource for changes, so it is recommended that configuration changes be made to a new `Secret` resource and that the `spec.configBundleSecret` field is updated to reflect the change. In the event there are issues with the new configuration, it is simple to revert the value of `spec.configBundleSecret` to the older `Secret`. +Alternatively, you can use the config editor UI to configure the Quay application, as described in the section xref:operator-config-ui[]. -The procedure for changing the configuration involves: +== Editing the config bundle secret in the OpenShift console -. Determining the current endpoints and secrets -. Downloading the existing configuration bundle, if {productname} has already been deployed on OpenShift -. Creating or updating the `config.yaml` configuration file -. Assembling any SSL certs required for Quay, or custom SSL certs needed for services -. Creating a new config bundle secret, using the config file and any certs -. Creating or updating the registry, referencing the new config bundle secret and specifying any over-rides for managing components -. Monitoring the deployment to ensure successful completion and that the configuration changes have taken effect +.Procedure +. On the Quay Registry overview screen, click the link for the Config Bundle Secret: ++ +image:operator-quay-registry-overview.png[Quay Registry overview] -Alternatively, you can use the config editor UI to configure the Quay application, as described in the section xref:operator-config-ui[]. +. To edit the secret, click **Actions** -> **Edit Secret** ++ +image:operator-config-bundle-edit-secret.png[Edit secret] +. Modify the configuration and save the changes ++ +image:operator-save-config-changes.png[Save changes] +. Monitor the deployment to ensure successful completion and that the configuration changes have taken effect diff --git a/modules/operator-config-ui-change.adoc b/modules/operator-config-ui-change.adoc index 37b594528..949b11ad3 100644 --- a/modules/operator-config-ui-change.adoc +++ b/modules/operator-config-ui-change.adoc @@ -1,24 +1,31 @@ -[[operator-config-ui-change]] +:_content-type: PROCEDURE +[id="operator-config-ui-change"] == Changing configuration -In this example of updating the configuration, a superuser is added via the config editor tool: +In the following example, you will update your configuration file by changing the default expiration period of deleted tags. -. Add an expiration period, for example `4w`, for the time machine functionality: +.Procedure + +. On the config editor, locate the *Time Machine* section. + +. Add an expiration period to the *Allowed expiration periods* box, for example, `4w`: + image:ui-time-machine-add.png[Add expiration period] -. Select `Validate Configuration Changes` to ensure that the changes are valid -. Apply the changes by pressing the `Reconfigure Quay` button: + +. Select *Validate Configuration Changes* to ensure that the changes are valid. + +. Apply the changes by pressing *Reconfigure Quay*: + image:config-editor-reconfigure.png[Reconfigure] -. The config tool notifies you that the change has been submitted to Quay: +After applying the changes, the config tool notifies you that the changes made have been submitted to your {productname} deployment: + image:config-editor-reconfigured.png[Reconfigured] [NOTE] ==== -Reconfiguring {productname} using the config tool UI can lead to the registry being unavailable for a short time, while the updated configuration is applied. +Reconfiguring {productname} using the config tool UI can lead to the registry being unavailable for a short time while the updated configuration is applied. ==== diff --git a/modules/operator-config-ui-updated.adoc b/modules/operator-config-ui-updated.adoc index aaefb6b9e..5d262e7cf 100644 --- a/modules/operator-config-ui-updated.adoc +++ b/modules/operator-config-ui-updated.adoc @@ -3,21 +3,17 @@ == Accessing the updated config tool credentials in the UI -Since a new pod has been created for the config tool, a new secret will have been created, and you will need to use the updated password when you next attempt to login: - -image:config-editor-secret-updated.png[Config editor secret updated] - - +With {productname} 3.7, reconfiguring Quay through the UI no longer generates a new login password. The password now generates only once, and remains the same after reconciling `QuayRegistry` objects. == Accessing the updated config.yaml in the UI -Use the config bundle to access the updated `config.yaml` file. +Use the config bundle to access the updated `config.yaml` file. . On the QuayRegistry details screen, click on the Config Bundle Secret . In the Data section of the Secret details screen, click Reveal values to see the `config.yaml` file -. Check that the change has been applied. In this case, `4w` should be in the list of `TAG_EXPIRATION_OPTIONS`: +. Check that the change has been applied. In this case, `4w` should be in the list of `TAG_EXPIRATION_OPTIONS`: + [source,yaml] ---- diff --git a/modules/operator-console-monitoring-alerting.adoc b/modules/operator-console-monitoring-alerting.adoc index 36f6e3263..2c0401022 100644 --- a/modules/operator-console-monitoring-alerting.adoc +++ b/modules/operator-console-monitoring-alerting.adoc @@ -1,7 +1,7 @@ [[operator-console-monitoring-alerting]] = Console monitoring and alerting -{productname} {producty} provides support for monitoring Quay instances that were deployed using the Operator, from inside the OpenShift console. The new monitoring features include a Grafana dashboard, access to individual metrics, and alerting to notify for frequently restarting Quay pods. +{productname} provides support for monitoring Quay instances that were deployed using the Operator, from inside the OpenShift console. The new monitoring features include a Grafana dashboard, access to individual metrics, and alerting to notify for frequently restarting Quay pods. [NOTE] ==== @@ -26,7 +26,7 @@ image:console-dashboard-1.png[Console dashboard] == Metrics -You can see the underlying metrics behind the Quay dashboard, by accessing Monitoring -> Metrics in the UI. In the Expression field, enter the text `quay_` to see the list of metrics available: +You can see the underlying metrics behind the Quay dashboard, by accessing Monitoring -> Metrics in the UI. In the Expression field, enter the text `quay_` to see the list of metrics available: image:quay-metrics.png[Quay metrics] @@ -38,11 +38,10 @@ This metric shows the number of organizations in the registry, and it is directl == Alerting -An alert is raised if the Quay pods restart too often. The alert can be configured by accessing the Alerting rules tab from Monitoring -> Alerting in the consol UI and searching for the Quay-specific alert: +An alert is raised if the Quay pods restart too often. The alert can be configured by accessing the Alerting rules tab from Monitoring -> Alerting in the consol UI and searching for the Quay-specific alert: image:alerting-rules.png[Alerting rules] -Select the QuayPodFrequentlyRestarting rule detail to configure the alert: +Select the QuayPodFrequentlyRestarting rule detail to configure the alert: image:quay-pod-frequently-restarting.png[Alerting rule details] - diff --git a/modules/operator-custom-ssl-certs-config-bundle.adoc b/modules/operator-custom-ssl-certs-config-bundle.adoc index 7dd9ba92a..4675f7161 100644 --- a/modules/operator-custom-ssl-certs-config-bundle.adoc +++ b/modules/operator-custom-ssl-certs-config-bundle.adoc @@ -1,9 +1,35 @@ [[operator-custom-ssl-certs-config-bundle]] = Using the config bundle to configure custom SSL certs +You can configure custom SSL certs either before initial deployment or after {productname} is deployed on OpenShift, by creating or updating the config bundle secret. If you are adding the cert(s) to an existing deployment, you must include the existing `config.yaml` in the new config bundle secret, even if you are not making any configuration changes. -You can configure custom SSL certs either before initial deployment or after {productname} is deployed on OpenShift, by creating a new config bundle secret. If you are adding the cert(s) to an existing deployment, you must include the complete existing `config.yaml` in the new config bundle secret, even if you are not making any configuration changes. +== Set TLS to unmanaged +In your Quay Registry yaml, set `kind: tls` to `managed: false`: + +[source,yaml] +---- + - kind: tls + managed: false +---- + +In the events, you should see that the change is blocked until you set up the appropriate config: + +[source,yaml] +---- + - lastTransitionTime: '2022-03-28T12:56:49Z' + lastUpdateTime: '2022-03-28T12:56:49Z' + message: >- + required component `tls` marked as unmanaged, but `configBundleSecret` + is missing necessary fields + reason: ConfigInvalid + status: 'True' + +---- + +== Add certs to config bundle + +.Procedure . Create the secret using embedded data or using files: .. Embed the configuration details directly in the Secret resource YAML file, for example: + @@ -17,9 +43,15 @@ metadata: namespace: quay-enterprise data: config.yaml: | - ALLOW_PULLS_WITHOUT_STRICT_LOGGING: false - AUTHENTICATION_TYPE: Database - ... + FEATURE_USER_INITIALIZE: true + BROWSER_API_CALLS_XHR_ONLY: false + SUPER_USERS: + - quayadmin + FEATURE_USER_CREATION: false + FEATURE_QUOTA_MANAGEMENT: true + FEATURE_PROXY_CACHE: true + FEATURE_BUILD_SUPPORT: true + DEFAULT_SYSTEM_REJECT_QUOTA_BYTES: 102400000 extra_ca_cert_my-custom-ssl.crt: | -----BEGIN CERTIFICATE----- MIIDsDCCApigAwIBAgIUCqlzkHjF5i5TXLFy+sepFrZr/UswDQYJKoZIhvcNAQEL @@ -42,7 +74,7 @@ $ oc create secret generic custom-ssl-config-bundle-secret \ ---- -. Create or update the QuayRegistry YAML file `quayregistry.yaml`, referencing the created Secret, for example: +. Create or update the QuayRegistry YAML file `quayregistry.yaml`, referencing the created Secret, for example: + .quayregistry.yaml [source,yaml] diff --git a/modules/operator-customize-images.adoc b/modules/operator-customize-images.adoc index 10157b729..31f0289b9 100644 --- a/modules/operator-customize-images.adoc +++ b/modules/operator-customize-images.adoc @@ -1,15 +1,18 @@ -[[operator-customize-images]] +:_content-type: PROCEDURE +[id="operator-customize-images"] = Customizing Default Operator Images -[NOTE] +In certain circumstances, it might be useful to override the default images used by the {productname} Operator. This can be done by setting one or more environment variables in the {productname} Operator `ClusterServiceVersion`. + +[IMPORTANT] ==== -Using this mechanism is not supported for production Quay environments and is strongly encouraged only for development/testing purposes. There is no guarantee your deployment will work correctly when using non-default images with the Quay Operator. +Using this mechanism is not supported for production {productname} environments and is strongly encouraged only for development/testing purposes. There is no guarantee your deployment will work correctly when using non-default images with the {productname} Operator. ==== -In certain circumstances, it may be useful to override the default images used by the Operator. This can be done by setting one or more environment variables in the Quay Operator `ClusterServiceVersion`. - +[id="custom-environment-variables"] == Environment Variables -The following environment variables are used in the Operator to override component images: + +The following environment variables are used in the {productname} Operator to override component images: [cols=2*] |=== @@ -31,21 +34,29 @@ The following environment variables are used in the Operator to override compone [NOTE] ==== -Override images *must* be referenced by manifest (@sha256:), not by tag (:latest). +Overridden images *must* be referenced by manifest (@sha256:) and not by tag (:latest). ==== -== Applying Overrides to a Running Operator +[id="applying-overrides-to-running-operator"] +== Applying overrides to a running Operator -When the Quay Operator is installed in a cluster via the link:https://docs.openshift.com/container-platform/4.6/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)], the managed component container images can be easily overridden by modifying the `ClusterServiceVersion` object, which is OLM's representation of a running Operator in the cluster. Find the Quay Operator's `ClusterServiceVersion` either by using a Kubernetes UI or `kubectl`/`oc`: +When the {productname} Operator is installed in a cluster through the link:https://docs.openshift.com/container-platform/4.6/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)], the managed component container images can be easily overridden by modifying the `ClusterServiceVersion` object. -``` -$ oc get clusterserviceversions -n -``` +Use the following procedure to apply overrides to a running Operator. -Using the UI, `oc edit`, or any other method, modify the Quay `ClusterServiceVersion` to include the environment variables outlined above to point to the override images: +.Procedure -*JSONPath*: `spec.install.spec.deployments[0].spec.template.spec.containers[0].env` +. The `ClusterServiceVersion` object is OLM's representation of a running Operator in the cluster. Find the {productname} Operator's `ClusterServiceVersion` by using a Kubernetes UI or the `kubectl`/`oc` CLI tool. For example: ++ +[source,terminal] +---- +$ oc get clusterserviceversions -n +---- +. Using the UI, `oc edit`, or another method, modify the {productname} `ClusterServiceVersion` to include the environment variables outlined above to point to the override images: ++ +*JSONPath*: `spec.install.spec.deployments[0].spec.template.spec.containers[0].env` ++ [source,yaml] ---- - name: RELATED_IMAGE_COMPONENT_QUAY diff --git a/modules/operator-deploy-cli.adoc b/modules/operator-deploy-cli.adoc index 4d1604e22..2d04edf6b 100644 --- a/modules/operator-deploy-cli.adoc +++ b/modules/operator-deploy-cli.adoc @@ -48,17 +48,70 @@ metadata: name: example-registry namespace: quay-enterprise spec: - configBundleSecret: init-config-bundle-secret + configBundleSecret: init-config-bundle-secret ---- + +.. If you have a proxy configured, you can add the information using overrides for Quay, Clair, and mirroring: ++ +.quayregistry.yaml: +[source,yaml] +---- + kind: QuayRegistry + metadata: + name: quay37 + spec: + configBundleSecret: config-bundle-secret + components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: mirror + managed: true + overrides: + env: + - name: DEBUGLOG + value: "true" + - name: HTTP_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: HTTPS_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: NO_PROXY + value: svc.cluster.local,localhost,quay370.apps.quayperf370.perfscale.devcluster.openshift.com + - kind: tls + managed: false + - kind: clair + managed: true + overrides: + env: + - name: HTTP_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: HTTPS_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: NO_PROXY + value: svc.cluster.local,localhost,quay370.apps.quayperf370.perfscale.devcluster.openshift.com + - kind: quay + managed: true + overrides: + env: + - name: DEBUGLOG + value: "true" + - name: NO_PROXY + value: svc.cluster.local,localhost,quay370.apps.quayperf370.perfscale.devcluster.openshift.com + - name: HTTP_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: HTTPS_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 +---- + . Create the `QuayRegistry` in specified namespace: + ```sh -$ oc create -f quayregistry.yaml +$ oc create -n quay-enterprise -f quayregistry.yaml ``` . See the section xref:operator-monitor-deploy-cli[Monitoring and debugging the deployment process] for information on how to track the progress of the deployment. -. Wait until the `status.registryEndpoint` is populated. +. Wait until the `status.registryEndpoint` is populated. + ```sh $ oc get quayregistry -n quay-enterprise example-registry -o jsonpath="{.status.registryEndpoint}" -w ``` - diff --git a/modules/operator-helm-oci.adoc b/modules/operator-helm-oci.adoc index d8660a7ea..d775452f1 100644 --- a/modules/operator-helm-oci.adoc +++ b/modules/operator-helm-oci.adoc @@ -21,7 +21,7 @@ type: Opaque [IMPORTANT] ==== -As of {productname} {producty}, `FEATURE_HELM_OCI_SUPPORT` has been deprecated and will be removed in a future version of {productname}. In {productname} {producty}, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. +As of {productname} {producty}, `FEATURE_HELM_OCI_SUPPORT` has been deprecated and will be removed in a future version of {productname}. In {productname} 3.6, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. ==== Create the secret in the appropriate namespace, in this example `quay-enterprise`: diff --git a/modules/operator-install.adoc b/modules/operator-install.adoc index f9ba04fed..650ebde73 100644 --- a/modules/operator-install.adoc +++ b/modules/operator-install.adoc @@ -4,7 +4,7 @@ -. Using the OpenShift console, Select Operators -> OperatorHub, then select the Red Hat Quay Operator. If there is more than one, be sure to use the Red Hat certified Operator and not the community version. +. Using the OpenShift console, Select Operators -> OperatorHub, then select the {productname} Operator. If there is more than one, be sure to use the Red Hat certified Operator and not the community version. + image:operatorhub-quay.png[] . The Installation page outlines the features and prerequisites: @@ -15,7 +15,7 @@ image:operator-install-page.png[] image:operator-subscription.png[] . The following choices are available for customizing the installation: -* **Update Channel:** Choose the update channel, for example, `stable-3.6` for the latest release. +* **Update Channel:** Choose the update channel, for example, `stable-3.7` for the latest release. * **Installation Mode:** Choose `All namespaces on the cluster` if you want the Operator to be available cluster-wide. Choose `A specific namespace on the cluster` if you want it deployed only within a single namespace. It is recommended that you install the Operator cluster-wide. If you choose a single namespace, the monitoring component will not be available by default. diff --git a/modules/operator-ipv6-dual-stack.adoc b/modules/operator-ipv6-dual-stack.adoc new file mode 100644 index 000000000..7ed74b127 --- /dev/null +++ b/modules/operator-ipv6-dual-stack.adoc @@ -0,0 +1,64 @@ +:_content-type: CONCEPT +[id="operator-ipv6-dual-stack"] += Deploying IPv6 on the {productname} Operator + +Your {productname} Operator deployment can now be served in locations that only support IPv6, such as Telco and Edge environments. + +For a list of known limitations, see xref:operator-ipv6-limitations-38[IPv6 limitations] + +[id="proc-manage-enabling-ipv6"] +== Enabling the IPv6 protocol family + +Use the following procedure to enable IPv6 support on your standalone {productname} deployment. + +.Prerequisites + +* You have updated {productname} to 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `IPv6`, for example: ++ +[source,yaml] +---- +--- +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: IPv6 +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +--- +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to IPv6 by entering the following command: ++ +[source,terminal] +---- +$ curl /health/instance +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +After enabling IPv6 in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured to use IPv6 and is not hindered by the xref:operator-ipv6-limitations-38[IPv6 and dual-stack limitations]. + +[WARNING] +==== +If your environment is configured to IPv4, but the `FEATURE_LISTEN_IP_VERSION` configuration field is set to `IPv6`, {productname} will fail to deploy. +==== + +[id="operator-ipv6-limitations-38"] +== IPv6 limitations + +* Currently, attempting to configure your {productname} deployment with the common Azure Blob Storage configuration will not work on IPv6 single stack environments. Because the endpoint of Azure Blob Storage does not support IPv6, there is no workaround in place for this issue. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4433[PROJQUAY-4433]. + +* Currently, attempting to configure your {productname} deployment with Amazon S3 CloudFront will not work on IPv6 single stack environments. Because the endpoint of Amazon S3 CloudFront does not support IPv6, there is no workaround in place for this issue. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4470[PROJQUAY-4470]. + +* Currently, OpenShift Data Foundations (ODF) is unsupported when {productname} is deployed on IPv6 single stack environments. As a result, ODF cannot be used in IPv6 environments. This limitation is scheduled to be fixed in a future version of OpenShift Data Foundations. + +* Currently, dual-stack (IPv4 and IPv6) support does not work on {productname} {ocp} deployments. When {productname} 3.8 is deployed on {ocp} with dual-stack support enabled, the Quay Route generated by the {productname} Operator only generates an IPv4 address, and not an IPv6 address. As a result, clients with an IPv6 address cannot access the {productname} application on {ocp}. This limitation is scheduled to be fixed in a future version of {ocp}. \ No newline at end of file diff --git a/modules/operator-monitor-deploy-cli.adoc b/modules/operator-monitor-deploy-cli.adoc index 31ed65cff..71b68a77c 100644 --- a/modules/operator-monitor-deploy-cli.adoc +++ b/modules/operator-monitor-deploy-cli.adoc @@ -1,7 +1,7 @@ [[operator-monitor-deploy-cli]] = Monitoring and debugging the deployment process -{productname} 3.6 provides new functionality to troubleshoot problems during the deployment phase. The status in the QuayRegistry object can help you monitor the health of the components during the deployment an help you debug any problems that may arise: +Users can now troubleshoot problems during the deployment phase. The status in the `QuayRegistry` object can help you monitor the health of the components during the deployment an help you debug any problems that may arise: ``` @@ -136,7 +136,7 @@ When the deployment process finishes successfully, the status in the QuayRegistr type: RolloutBlocked configEditorCredentialsSecret: example-registry-quay-config-editor-credentials-hg7gg7h57m configEditorEndpoint: https://example-registry-quay-config-editor-quay-enterprise.apps.docs.quayteam.org - currentVersion: 3.6.0 + currentVersion: {producty} lastUpdated: 2021-09-14 10:52:46.104181633 +0000 UTC registryEndpoint: https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org unhealthyComponents: {} diff --git a/modules/operator-preconfig-tls-routes.adoc b/modules/operator-preconfig-tls-routes.adoc index 2ecd501a7..116e83bc8 100644 --- a/modules/operator-preconfig-tls-routes.adoc +++ b/modules/operator-preconfig-tls-routes.adoc @@ -1,17 +1,23 @@ -[[operator-preconfig-tls-routes]] -= Configuring TLS and routes +:_content-type: REFERENCE +[id="operator-preconfig-tls-routes"] += Configuring SSL/TLS and Routes -Support for OpenShift Container Platform Edge-Termination Routes has been added by way of a new managed component, `tls`. This separates the `route` component from TLS and allows users to configure both separately. `EXTERNAL_TLS_TERMINATION: true` is the opinionated setting. Managed `tls` means that the default cluster wildcard cert is used. Unmanaged `tls` means that the user provided cert/key pair will be injected into the `Route`. +Support for {ocp} Edge-Termination Routes has been added by way of a new managed component, `tls`. This separates the `route` component from SSL/TLS and allows users to configure both separately. -`ssl.cert` and `ssl.key` are now moved to a separate, persistent Secret, which ensures that the cert/key pair is not re-generated upon every reconcile. These are now formatted as `edge` routes and mounted to the same directory in the Quay container. +`EXTERNAL_TLS_TERMINATION: true` is the opinionated setting. -Multiple permutations are possible when configuring TLS and Routes, but the following rules apply: +* Managed `tls` means that the default cluster wildcard certificate is used. +* Unmanaged `tls` means that the user provided key and certificate pair is be injected into the `Route`. -* If TLS is `managed`, then route must also be `managed` -* If TLS is `unmanaged` then you must supply certs, either with the config tool or directly in the config bundle -//* However, it is possible to have both TLS and route `unmanaged` and not supply certs. +The `ssl.cert` and `ssl.key` are now moved to a separate, persistent secret, which ensures that the key and certificate pair are not re-generated upon every reconcile. The key and certificate pair are now formatted as `edge` routes and mounted to the same directory in the `Quay` container. -The following table outlines the valid options: +Multiple permutations are possible when configuring SSL/TLS and Routes, but the following rules apply: + +* If SSL/TLS is `managed`, then your route must also be `managed` +* If SSL/TLS is `unmanaged` then you must supply certificates, either with the config tool or directly in the config bundle +//* However, it is possible to have both TLS and route `unmanaged` and not supply certs. + +The following table describes the valid options: .Valid configuration options for TLS and routes [width="100%",cols="2,2,2,2,3"options="header"] @@ -23,18 +29,21 @@ The following table outlines the valid options: // | None (Not for production) | Unmanaged | Unmanaged | No | Sets a passthrough route, allows HTTP traffic directly from the route and into the Pod |=== - [NOTE] ==== -{productname} 3.6 does not support builders when TLS is managed by the Operator. +{productname} 3.7 does not support builders when TLS is managed by the Operator. ==== +[id="creating-config-bundle-secret-tls-cert-key-pair"] +== Creating the config bundle secret with the SSL/TLS cert and key pair -== Creating the config bundle secret with TLS cert, key pair: +Use the following procedure to create a config bundle secret that includes your own SSL/TLS certificate and key pair. -To add your own TLS cert and key, include them in the config bundle secret as follows: +.Procedure -[source,bash] +* Enter the following command to create config bundle secret that includes your own SSL/TLS certificate and key pair: ++ +[source,terminal] ---- $ oc create secret generic --from-file config.yaml=./config.yaml --from-file ssl.cert=./ssl.cert --from-file ssl.key=./ssl.key config-bundle-secret ----- \ No newline at end of file +---- diff --git a/modules/operator-preconfigure.adoc b/modules/operator-preconfigure.adoc index 0464dd719..799fb5464 100644 --- a/modules/operator-preconfigure.adoc +++ b/modules/operator-preconfigure.adoc @@ -30,5 +30,5 @@ spec: . Deploy the registry using the YAML file: + ---- -oc create -f quayregistry.yaml +$ oc create -n quay-enterprise -f quayregistry.yaml ---- diff --git a/modules/operator-resize-storage.adoc b/modules/operator-resize-storage.adoc index b4269b282..1dad8918b 100644 --- a/modules/operator-resize-storage.adoc +++ b/modules/operator-resize-storage.adoc @@ -1,32 +1,57 @@ -[[operator-resize-storage]] +:_content-type: PROCEDURE +[id="operator-resize-storage"] = Resizing Managed Storage -The Quay Operator creates default object storage using the defaults provided by RHOCS when creating a `NooBaa` object (50 Gib). There are two ways to extend this storage; you can resize an existing PVC or add more PVCs to a new storage pool. +The {productname} Operator creates default object storage using the defaults provided by Red Hat OpenShift Data Foundation (ODF) when creating a `NooBaa` object (50 Gib). -== Resize Noobaa PVC +There are two ways to extend `NooBaa` object storage: -. Log into the OpenShift console and select `Storage` -> `Persistent Volume Claims`. -. Select the `PersistentVolumeClaim` named like `noobaa-default-backing-store-noobaa-pvc-*`. -. From the Action menu, select `Expand PVC`. -. Enter the new size of the Persistent Volume Claim and select `Expand`. - -After a few minutes (depending on the size of the PVC), the expanded size should reflect in the PVC's `Capacity` field. +. You can resize an existing persistent volume claim (PVC). +. You can add more PVCs to a new storage pool. [NOTE] ==== -Expanding CSI volumes is a Technology Preview feature only. For more information, see link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.6/html/storage/expanding-persistent-volumes[]. +Expanding CSI volumes is a Technology Preview feature only. For more information, see link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.6/html/storage/expanding-persistent-volumes[]. ==== -== Add Another Storage Pool +[id="resizing-noobaa-pvc"] +== Resizing the NooBaa PVC + +Use the following procedure to resize the NooBaa PVC. + +.Procedure + +. Log into the {ocp} console and select *Storage* -> *Persistent Volume Claims*. + +. Select the `PersistentVolumeClaim` named like `noobaa-default-backing-store-noobaa-pvc-*`. + +. From the *Action* menu, select *Expand PVC*. + +. Enter the new size of the Persistent Volume Claim and select *Expand*. + +After a few minutes (depending on the size of the PVC), the expanded size should reflect in the PVC's *Capacity* field. + +[id="adding-another-storage-pool"] +== Adding an additional storage pool + +Use the following procedure to add a second storage pool. + +.Procedure + +. Log into the {ocp} console and select *Networking* -> *Routes*. Make sure that the `openshift-storage` project is selected. + +. Click on the *Location* field for the `noobaa-mgmt` route. -. Log into the OpenShift console and select `Networking` -> `Routes`. Make sure the `openshift-storage` project is selected. -. Click on the `Location` field for the `noobaa-mgmt` Route. . Log into the Noobaa Management Console. -. On the main dashboard, under `Storage Resources`, select `Add Storage Resources`. -. Select `Deploy Kubernetes Pool` -. Enter a new pool name. Click `Next`. -. Choose the number of Pods to manage the pool and set the size per node. Click `Next`. -. Click `Deploy`. -After a few minutes, the additional storage pool will be added to the Noobaa resources and available for use by {productname}. +. On the main dashboard, select *Add Storage Resources*. + +. Select *Deploy Kubernetes Pool*. + +. Enter a new pool name and then click *Next*. + +. Choose the number of pods to manage the storage pool and set the size per node. + +. Click *Next* -> *Deploy*. +After a few minutes, the additional storage pool will be added to the NooBaa resources and available for use by {productname}. \ No newline at end of file diff --git a/modules/operator-standalone-object-gateway.adoc b/modules/operator-standalone-object-gateway.adoc index 435217e02..18f6c6c50 100644 --- a/modules/operator-standalone-object-gateway.adoc +++ b/modules/operator-standalone-object-gateway.adoc @@ -1,9 +1,9 @@ [[operator-standalone-object-gateway]] = About The Standalone Object Gateway -As part of a Red Hat Quay subscription, users are entitled to use the _Multi-Cloud Object Gateway_ (MCG) component of the Red Hat OpenShift Data Foundation Operator (formerly known as OpenShift Container Storage Operator). This gateway component allows you to provide an S3-compatible object storage interface to Quay backed by Kubernetes `PersistentVolume`-based block storage. The usage is limited to a Quay deployment managed by the Operator and to the exact specifications of the MCG instance as documented below. +As part of a {productname} subscription, users are entitled to use the _Multi-Cloud Object Gateway_ (MCG) component of the Red Hat OpenShift Data Foundation Operator (formerly known as OpenShift Container Storage Operator). This gateway component allows you to provide an S3-compatible object storage interface to Quay backed by Kubernetes `PersistentVolume`-based block storage. The usage is limited to a Quay deployment managed by the Operator and to the exact specifications of the MCG instance as documented below. -Since Red Hat Quay does not support local filesystem storage, users can leverage the gateway in combination with Kubernetes `PersistentVolume` storage instead, to provide a supported deployment. A `PersistentVolume` is directly mounted on the gateway instance as a backing store for object storage and any block-based `StorageClass` is supported. +Since {productname} does not support local filesystem storage, users can leverage the gateway in combination with Kubernetes `PersistentVolume` storage instead, to provide a supported deployment. A `PersistentVolume` is directly mounted on the gateway instance as a backing store for object storage and any block-based `StorageClass` is supported. By the nature of `PersistentVolume`, this is not a scale-out, highly available solution and does not replace a scale-out storage system like Red Hat OpenShift Data Foundation (ODF). Only a single instance of the gateway is running. If the pod running the gateway becomes unavailable due to rescheduling, updates or unplanned downtime, this will cause temporary degradation of the connected Quay instances. @@ -11,7 +11,7 @@ By the nature of `PersistentVolume`, this is not a scale-out, highly available s To install the ODF (formerly known as OpenShift Container Storage) Operator and configure a single instance Multi-Cloud Gateway service, follow these steps: -. Open the OpenShift console and select Operators -> OperatorHub, then select the OpenShift Data Foundation Operator. +. Open the OpenShift console and select Operators -> OperatorHub, then select the OpenShift Data Foundation Operator. . Select Install. Accept all default options and select Install again. . Within a minute, the Operator will install and create a namespace `openshift-storage`. You can confirm it has completed when the `Status` column is marked `Succeeded`. + @@ -39,7 +39,7 @@ spec: memory: 1Gi ``` + -This will create a single instance deployment of the _Multi-cloud Object Gateway_. +This will create a single instance deployment of the _Multi-cloud Object Gateway_. . Apply the configuration with the following command: + ``` @@ -96,5 +96,4 @@ This creates the backing store configuration for the gateway. All images in Quay $ oc patch bucketclass noobaa-default-bucket-class --patch '{"spec":{"placementPolicy":{"tiers":[{"backingStores":["noobaa-pv-backing-store"]}]}}}' --type merge -n openshift-storage ``` -This concludes the setup of the _Multi-Cloud Object Gateway_ instance for Red Hat Quay. Note that this configuration cannot be run in parallel on a cluster with Red Hat OpenShift Data Foundation installed. - +This concludes the setup of the _Multi-Cloud Object Gateway_ instance for {productname}. Note that this configuration cannot be run in parallel on a cluster with Red Hat OpenShift Data Foundation installed. diff --git a/modules/operator-unmanaged-hpa.adoc b/modules/operator-unmanaged-hpa.adoc index e20035799..f6c828bc4 100644 --- a/modules/operator-unmanaged-hpa.adoc +++ b/modules/operator-unmanaged-hpa.adoc @@ -1,11 +1,15 @@ -[[operator-unmanaged-hpa]] -= Disabling the Horizontal Pod Autoscaler +:_content-type: REFERENCE +[id="operator-unmanaged-hpa"] += Horizontal Pod Autoscaler -`HorizontalPodAutoscalers` have been added to the Clair, Quay, and Mirror pods, so that they now automatically scale during load spikes. +Horizontal Pod Autoscalers (HPAs) have been added to the `Clair`, `Quay`, and `Mirror` pods, so that they now automatically scale during load spikes. -As HPA is configured by default to be `managed`, the number of pods for Quay, Clair and repository mirroring is set to two. This facilitates the avoidance of downtime when updating / reconfiguring Quay via the Operator or during rescheduling events. +As HPA is configured by default to be `managed`, the number of `Clair`, `Quay`, and `Mirror` pods is set to two. This facilitates the avoidance of downtime when updating or reconfiguring {productname} by the Operator or during rescheduling events. -If you wish to disable autoscaling or create your own `HorizontalPodAutoscaler`, simply specify the component as unmanaged in the `QuayRegistry` instance: +[id="operator-disabling-hpa"] +== Disabling the Horizontal Pod Autoscaler + +To disable autoscaling or create your own `HorizontalPodAutoscaler`, specify the component as `unmanaged` in the `QuayRegistry` instance. For example: [source,yaml] ---- diff --git a/modules/operator-unmanaged-monitoring.adoc b/modules/operator-unmanaged-monitoring.adoc index cb3e8b00a..071416282 100644 --- a/modules/operator-unmanaged-monitoring.adoc +++ b/modules/operator-unmanaged-monitoring.adoc @@ -1,10 +1,10 @@ -[[operator-unmanaged-monitoring]] +:_content-type: REFERENCE +[id="operator-unmanaged-monitoring"] = Unmanaged monitoring -If you install the Quay Operator in a single namespace, the monitoring component is automatically set to 'unmanaged'. To enable monitoring in this scenario, see the section xref:monitoring-single-namespace[]. - -To disable monitoring explicitly: +If you install the {productname} Operator in a single namespace, the monitoring component is automatically set to `unmanaged`. Use the following reference to explicitly disable monitoring. +.Unmanaged monitoring [source,yaml] ---- apiVersion: quay.redhat.com/v1 @@ -16,4 +16,6 @@ spec: components: - kind: monitoring managed: false ----- \ No newline at end of file +---- + +To enable monitoring in this scenario, see the section xref:monitoring-single-namespace[Enabling monitoring when the {productname} Operator is installed in a single namespace]. \ No newline at end of file diff --git a/modules/operator-unmanaged-postgres.adoc b/modules/operator-unmanaged-postgres.adoc index 00cdac034..64fe54921 100644 --- a/modules/operator-unmanaged-postgres.adoc +++ b/modules/operator-unmanaged-postgres.adoc @@ -1,6 +1,10 @@ [[operator-unmanaged-postgres]] = Using an existing Postgres database +Requirements: + +If you are using an externally managed PostgreSQL database, you must manually enable pg_trgm extension for a successful deployment. + . Create a configuration file `config.yaml` with the necessary database fields: + .config.yaml: diff --git a/modules/operator-unmanaged-redis.adoc b/modules/operator-unmanaged-redis.adoc index 8a155009e..ccb3d8afb 100644 --- a/modules/operator-unmanaged-redis.adoc +++ b/modules/operator-unmanaged-redis.adoc @@ -1,31 +1,35 @@ -[[operator-unmanaged-redis]] +[id="operator-unmanaged-redis"] = Using external Redis +Use the following procedure to use an external Redis database. + If you wish to use an external Redis database, set the component as unmanaged in the `QuayRegistry` instance: +.Procedure -. Create a configuration file `config.yaml` with the necessary redis fields: +. Create a `config.yaml` file using the following Redis fields: + [source,yaml] ---- BUILDLOGS_REDIS: host: quay-server.example.com - password: strongpassword port: 6379 + ssl: false USER_EVENTS_REDIS: host: quay-server.example.com - password: strongpassword port: 6379 + ssl: false ---- -. Create a Secret using the configuration file +. Enter the following command to create a secret using the configuration file: + -``` +[source,terminal] +---- $ oc create secret generic --from-file config.yaml=./config.yaml config-bundle-secret -``` +---- -. Create a QuayRegistry YAML file `quayregistry.yaml` which marks redis component as unmanaged and references the created Secret: +. Create a `quayregistry.yaml` file that sets the Redis component to `unmanaged` and references the created secret: + [source,yaml] ---- @@ -41,5 +45,4 @@ spec: managed: false ---- -. Deploy the registry - +. Deploy the {productname} registry. \ No newline at end of file diff --git a/modules/operator-unmanaged-route.adoc b/modules/operator-unmanaged-route.adoc index 14edf5bdb..d16ba67cf 100644 --- a/modules/operator-unmanaged-route.adoc +++ b/modules/operator-unmanaged-route.adoc @@ -1,9 +1,12 @@ -[[operator-unmanaged-route]] +:_content-type: PROCEDURE +[id="operator-unmanaged-route"] = Disabling Route Component -To prevent the Operator from creating a `Route`: +Use the following procedure to prevent the {productname} Operator from creating a route. -. Mark the component as unmanaged in the `QuayRegistry`: +.Procedure + +. Set the component as `unmanaged` in the `quayregistry.yaml` file: + [source,yaml] ---- @@ -18,9 +21,8 @@ spec: managed: false ---- -. Specify that you want Quay to handle TLS in the configuration, by editing the `config.yaml` file: +. Edit the `config.yaml` file to specify that {productname} handles SSL/TLS. For example: + -.config.yaml [source,yaml] ---- ... @@ -32,7 +34,7 @@ PREFERRED_URL_SCHEME: https ... ---- + -If you do not configure the unmanaged Route correctly, you will see an error similar to the following: +If you do not configure the `unmanaged` route correctly, the following error is returned: + [source,json] ---- @@ -50,8 +52,7 @@ If you do not configure the unmanaged Route correctly, you will see an error sim } ---- - [NOTE] ==== -Disabling the default `Route` means you are now responsible for creating a `Route`, `Service`, or `Ingress` in order to access the Quay instance and that whatever DNS you use must match the `SERVER_HOSTNAME` in the Quay config. +Disabling the default route means you are now responsible for creating a `Route`, `Service`, or `Ingress` in order to access the {productname} instance. Additionally, whatever DNS you use must match the `SERVER_HOSTNAME` in the {productname} config. ==== diff --git a/modules/operator-unmanaged-storage-noobaa.adoc b/modules/operator-unmanaged-storage-noobaa.adoc index 1ca1e5754..4902feb6b 100644 --- a/modules/operator-unmanaged-storage-noobaa.adoc +++ b/modules/operator-unmanaged-storage-noobaa.adoc @@ -5,6 +5,7 @@ . Retrieve the Object Bucket Claim Data details including the Access Key, Bucket Name, Endpoint (hostname) and Secret Key. . Create a `config.yaml` configuration file, using the information for the Object Bucket Claim: + +[source,yaml] ---- DISTRIBUTED_STORAGE_CONFIG: default: diff --git a/modules/operator-unmanaged-storage.adoc b/modules/operator-unmanaged-storage.adoc index 04619dcf4..6ad6dd178 100644 --- a/modules/operator-unmanaged-storage.adoc +++ b/modules/operator-unmanaged-storage.adoc @@ -1,4 +1,126 @@ [[operator-unmanaged-storage]] = Unmanaged storage -Some configuration examples for unmanaged storage are provided in this section for convenience. See the {productname} configuration guide for full details for setting up object storage. \ No newline at end of file +Some configuration examples for unmanaged storage are provided in this section for convenience. See the {productname} configuration guide for full details for setting up object storage. + +== AWS S3 storage + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + s3Storage: + - S3Storage + - host: s3.us-east-2.amazonaws.com + s3_access_key: ABCDEFGHIJKLMN + s3_secret_key: OL3ABCDEFGHIJKLMN + s3_bucket: quay_bucket + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - s3Storage +---- + +== Google Cloud storage + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + googleCloudStorage: + - GoogleCloudStorage + - access_key: GOOGQIMFB3ABCDEFGHIJKLMN + bucket_name: quay-bucket + secret_key: FhDAYe2HeuAKfvZCAGyOioNaaRABCDEFGHIJKLMN + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - googleCloudStorage +---- + +== Azure storage + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + azureStorage: + - AzureStorage + - azure_account_name: azure_account_name_here + azure_container: azure_container_here + storage_path: /datastorage/registry + azure_account_key: azure_account_key_here + sas_token: some/path/ + endpoint_url: https://[account-name].blob.core.usgovcloudapi.net <1> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - azureStorage +---- +<1> The `endpoint_url` parameter for Azure storage is optional and can be used with Microsoft Azure Government (MAG) endpoints. If left blank, the `endpoint_url` will connect to the normal Azure region. ++ +As of {productname} 3.7, you must use the Primary endpoint of your MAG Blob service. Using the Secondary endpoint of your MAG Blob service will result in the following error: `AuthenticationErrorDetail:Cannot find the claimed account when trying to GetProperties for the account whusc8-secondary`. + +== Ceph/RadosGW Storage +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + radosGWStorage: #storage config name + - RadosGWStorage #actual driver + - access_key: access_key_here #parameters + secret_key: secret_key_here + bucket_name: bucket_name_here + hostname: hostname_here + is_secure: 'true' + port: '443' + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: #must contain name of the storage config + - radosGWStorage +---- + +== Swift storage + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + swiftStorage: + - SwiftStorage + - swift_user: swift_user_here + swift_password: swift_password_here + swift_container: swift_container_here + auth_url: https://example.org/swift/v1/quay + auth_version: 1 + ca_cert_path: /conf/stack/swift.cert" + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - swiftStorage +---- + +== NooBaa unmanaged storage + +Use the following procedure to deploy NooBaa as your unmanaged storage configuration. + +.Procedure + +. Create a NooBaa Object Bucket Claim in the {product-title} console by navigating to *Storage* -> *Object Bucket Claims*. + +. Retrieve the Object Bucket Claim Data details, including the Access Key, Bucket Name, Endpoint (hostname), and Secret Key. + +. Create a `config.yaml` configuration file using the information for the Object Bucket Claim: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - RHOCSStorage + - access_key: WmrXtSGk8B3nABCDEFGH + bucket_name: my-noobaa-bucket-claim-8b844191-dc6c-444e-9ea4-87ece0abcdef + hostname: s3.openshift-storage.svc.cluster.local + is_secure: true + port: "443" + secret_key: X9P5SDGJtmSuHFCMSLMbdNCMfUABCDEFGH+C5QD + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- + +For more information about configuring an Object Bucket Claim, see link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.8/html-single/managing_hybrid_and_multicloud_resources/index#object-bucket-claim[Object Bucket Claim]. \ No newline at end of file diff --git a/modules/operator-upgrade.adoc b/modules/operator-upgrade.adoc index 01517e3dd..80041a7fe 100644 --- a/modules/operator-upgrade.adoc +++ b/modules/operator-upgrade.adoc @@ -1,5 +1,5 @@ [[operator-upgrade]] -= Upgrading the Quay Operator Overview += Upgrading the Quay Operator Overview The Quay Operator follows a _synchronized versioning_ scheme, which means that each version of the Operator is tied to the version of Quay and the components that it manages. There is no field on the `QuayRegistry` custom resource which sets the version of Quay to deploy; the Operator only knows how to deploy a single version of all components. This scheme was chosen to ensure that all components work well together and to reduce the complexity of the Operator needing to know how to manage the lifecycles of many different versions of Quay on Kubernetes. @@ -12,25 +12,33 @@ The Quay Operator should be installed and upgraded using the link:https://docs.o When the Quay Operator is installed via Operator Lifecycle Manager, it may be configured to support automatic or manual upgrades. This option is shown on the *Operator Hub* page for the Quay Operator during installation. It can also be found in the Quay Operator `Subscription` object via the `approvalStrategy` field. Choosing `Automatic` means that your Quay Operator will automatically be upgraded whenever a new Operator version is released. If this is not desirable, then the `Manual` approval strategy should be selected. ==== - == Upgrading the Quay Operator The standard approach for upgrading installed Operators on OpenShift is documented at link:https://docs.openshift.com/container-platform/4.7/operators/admin/olm-upgrading-operators.html[Upgrading installed Operators]. -[NOTE] -==== -In general, {productname} only supports upgrading from one minor version to the next, for example, 3.4 -> 3.5. However, for 3.6, multiple upgrade paths are supported: +In general, {productname} supports upgrades from a prior (N-1) minor version only. For example, upgrading directly from {productname} 3.0.5 to the latest version of 3.5 is not supported. Instead, users would have to upgrade as follows: -* 3.3.z -> 3.6 -* 3.4.z -> 3.6 -* 3.5.z -> 3.6 -==== +. 3.0.5 -> 3.1.3 +. 3.1.3 -> 3.2.2 +. 3.2.2 -> 3.3.4 +. 3.3.4 -> 3.4.z +. 3.4.z -> 3.5.z + +This is required to ensure that any necessary database migrations are done correctly and in the right order during the upgrade. + +In some cases, {productname} supports direct, single-step upgrades from prior (N-2, N-3) minor versions. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. The following upgrade paths are supported: -For users on standalone deployments of Quay wanting to upgrade to 3.6, see the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#standalone_upgrade[Standalone upgrade] guide. +. 3.3.z -> 3.6.z +. 3.4.z -> 3.6.z +. 3.4.z -> 3.7.z +. 3.5.z -> 3.7.z +. 3.7.z -> 3.8.z +For users on standalone deployments of Quay wanting to upgrade to 3.8, see the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#standalone_upgrade[Standalone upgrade] guide. -=== Upgrading Quay -To update Quay from one minor version to the next, for example, 3.4 -> 3.5, you need to change the update channel for the Quay Operator. + +=== Upgrading Quay +To update Quay from one minor version to the next, for example, 3.4 -> 3.5, you need to change the update channel for the Quay Operator. For `z` stream upgrades, for example, 3.4.2 -> 3.4.3, updates are released in the major-minor channel that the user initially selected during install. The procedure to perform a `z` stream upgrade depends on the `approvalStrategy` as outlined above. If the approval strategy is set to `Automatic`, the Quay Operator will upgrade automatically to the newest `z` stream. This results in automatic, rolling Quay updates to newer `z` streams with little to no downtime. Otherwise, the update must be manually approved before installation can begin. @@ -41,7 +49,7 @@ For `z` stream upgrades, for example, 3.4.2 -> 3.4.3, updates are released in th * Previously, when running a 3.3.z version of {productname} with edge routing enabled, users were unable to upgrade to 3.4.z versions of {productname}. This has been resolved with the release of {productname} 3.6. -* When upgrading from 3.3.z to 3.6, if `tls.termination` is set to `none` in your {productname} 3.3.z deployment, it will change to HTTPS with TLS edge termination and use the default cluster wildcard certificate. For example: +* When upgrading from 3.3.z to 3.6, if `tls.termination` is set to `none` in your {productname} 3.3.z deployment, it will change to HTTPS with TLS edge termination and use the default cluster wildcard certificate. For example: + [source,yaml] ---- @@ -75,10 +83,31 @@ If possible, you should regenerate your TLS certificates with the correct hostna The `GODEBUG=x509ignoreCN=0` flag enables the legacy behavior of treating the CommonName field on X.509 certificates as a host name when no SANs are present. However, this workaround is not recommended, as it will not persist across a redeployment. -==== Configuring Clair v4 when upgrading from 3.3.z or 3.4.z to 3.6 using the Quay Operator -To set up Clair v4 on a new {productname} deployment on OpenShift, it is highly recommended to use the Quay Operator. By default, the Quay Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair security scanning automatically. +==== Configuring Clair v4 when upgrading from 3.3.z or 3.4.z to 3.6 using the Quay Operator +To set up Clair v4 on a new {productname} deployment on OpenShift, it is highly recommended to use the Quay Operator. By default, the Quay Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair automatically. + +For instructions on setting up Clair v4 on OpenShift, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-openshift[Setting Up Clair on a {productname} OpenShift deployment]. + +=== Swift configuration when upgrading from 3.3.z to 3.6 -For instructions on setting up Clair v4 on OpenShift, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-openshift[Setting Up Clair on a Red Hat Quay OpenShift deployment]. +When upgrading from {productname} 3.3.z to 3.6.z, some users might receive the following error: `Switch auth v3 requires tenant_id (string) in os_options`. As a workaround, you can manually update your `DISTRIBUTED_STORAGE_CONFIG` to add the `os_options` and `tenant_id` parameters: + +[source,yaml] +---- + DISTRIBUTED_STORAGE_CONFIG: + brscale: + - SwiftStorage + - auth_url: http://****/v3 + auth_version: "3" + os_options: + tenant_id: **** + project_name: ocp-base + user_domain_name: Default + storage_path: /datastorage/registry + swift_container: ocp-svc-quay-ha + swift_password: ***** + swift_user: ***** +---- === Changing the update channel for an Operator @@ -96,7 +125,6 @@ The list of Installed Operators provides a high-level summary of the current Qua image:installed-operators-list.png[Installed Operators] - == Upgrading a QuayRegistry When the Quay Operator starts, it immediately looks for any `QuayRegistries` it can find in the namespace(s) it is configured to watch. When it finds one, the following logic is used: @@ -105,22 +133,6 @@ When the Quay Operator starts, it immediately looks for any `QuayRegistries` it * If `status.currentVersion` equals the Operator version, reconcile as normal. * If `status.currentVersion` does not equal the Operator version, check if it can be upgraded. If it can, perform upgrade tasks and set the `status.currentVersion` to the Operator's version once complete. If it cannot be upgraded, return an error and leave the `QuayRegistry` and its deployed Kubernetes objects alone. -== Enabling features in Quay 3.6 - -=== Console monitoring and alerting - -The support for monitoring Quay 3.6 in the OpenShift console requires that the Operator is installed in all namespaces. If you previously installed the Operator in a specific namespace, delete the Operator itself and reinstall it for all namespaces once the upgrade has taken place. - -=== OCI and Helm support - -Support for Helm and some OCI artifacts is now enabled by default in {productname} {producty}. If you want to explicitly enable the feature, for example, if you are upgrading from a version where it is not enabled by default, you need to reconfigure your Quay deployment to enable the use of OCI artifacts using the following properties: - -[source,yaml] ----- -FEATURE_GENERAL_OCI_SUPPORT: true ----- - - == Upgrading a QuayEcosystem Upgrades are supported from previous versions of the Operator which used the `QuayEcosystem` API for a limited set of configurations. To ensure that migrations do not happen unexpectedly, a special label needs to be applied to the `QuayEcosystem` for it to be migrated. A new `QuayRegistry` will be created for the Operator to manage, but the old `QuayEcosystem` will remain until manually deleted to ensure that you can roll back and still access Quay in case anything goes wrong. To migrate an existing `QuayEcosystem` to a new `QuayRegistry`, follow these steps: diff --git a/modules/other-oci-artifacts-with-quay.adoc b/modules/other-oci-artifacts-with-quay.adoc index 2b3db82b0..7fe7af55e 100644 --- a/modules/other-oci-artifacts-with-quay.adoc +++ b/modules/other-oci-artifacts-with-quay.adoc @@ -1,7 +1,7 @@ [[other-oci-artifacts-with-quay]] = Adding other OCI media types to Quay -Helm, cosign, and ztsd compression scheme artifacts are built into {productname} {producty} by default. For any other OCI media type that is not supported by default, you can add them to the `ALLOWED_OCI_ARTIFACT_TYPES` configuration in Quay's config.yaml using the following format: +Helm, cosign, and ztsd compression scheme artifacts are built into {productname} 3.6 by default. For any other OCI media type that is not supported by default, you can add them to the `ALLOWED_OCI_ARTIFACT_TYPES` configuration in Quay's config.yaml using the following format: .... ALLOWED_OCI_ARTIFACT_TYPES: diff --git a/modules/proc_container-security-operator-setup.adoc b/modules/proc_container-security-operator-setup.adoc index f37a55130..4af70919f 100644 --- a/modules/proc_container-security-operator-setup.adoc +++ b/modules/proc_container-security-operator-setup.adoc @@ -1,17 +1,21 @@ -[[container-security-operator-setup]] -= Scan pod images with the Container Security Operator +:_content-type: PROCEDURE +[id="container-security-operator-setup"] += Scanning pod images with the Container Security Operator -Using the link:https://operatorhub.io/operator/container-security-operator[Container Security Operator], -(CSO) you can scan container images associated -with active pods, running on OpenShift (4.2 or later) and other Kubernetes -platforms, for known vulnerabilities. The CSO: +The link:https://operatorhub.io/operator/container-security-operator[Container Security Operator] (CSO) is an addon for the Clair security scanner available on {ocp} and other Kubernetes platforms. With the CSO, users can scan container images associated with active pods for known vulnerabilities. -* Watches containers associated with pods on all or specified namespaces -* Queries the container registry where the containers came from for vulnerability information provided an image’s registry supports image scanning (such as a Quay registry with Clair scanning) -* Exposes vulnerabilities via the ImageManifestVuln object in the Kubernetes API +[NOTE] +==== +The CSO does not work without {productname} and Clair. +==== + +The Container Security Operator (CSO) performs the following features: + +* Watches containers associated with pods on either specified or all namespaces. -Using the instructions here, the CSO is installed in the `marketplace-operators` namespace, -so it is available to all namespaces on your OpenShift cluster. +* Queries the container registry where the containers came from for vulnerability information (provided that an image's registry supports image scanning, such a a {productname} registry with Clair scanning). + +* Exposes vulnerabilities via the `ImageManifestVuln` object in the Kubernetes API. [NOTE] ==== @@ -19,9 +23,15 @@ To see instructions on installing the CSO on Kubernetes, select the Install button from the link:https://operatorhub.io/operator/container-security-operator[Container Security OperatorHub.io] page. ==== -== Run the CSO in OpenShift +[id="running-cso-openshift"] +== Downloading and running the Container Security Operator in {ocp} + +Use the following procedure to download the Container Security Operator. -To start using the CSO in OpenShift, do the following: +[NOTE] +==== +In the following procedure, the CSO is installed in the `marketplace-operators` namespace. This allows the CSO to be used in all namespaces of your {ocp} cluster. +==== . Go to Operators -> OperatorHub (select Security) to see the available `Container Security` Operator. diff --git a/modules/proc_creating-ocp-secret-for-oauth-token.adoc b/modules/proc_creating-ocp-secret-for-oauth-token.adoc new file mode 100644 index 000000000..a5c8e2b8b --- /dev/null +++ b/modules/proc_creating-ocp-secret-for-oauth-token.adoc @@ -0,0 +1,21 @@ +:_content-type: PROCEDURE +[id="creating-ocp-secret-for-oauth-token"] += Creating an {ocp} secret for the OAuth token + +In this procedure, you will add the previously obtained access token to communicate with your {productname} deployment. The access token will be stored within {ocp} as a secret. + +.Prerequisites + +* You have set up {productname} and obtained an access token. +* You have deployed the {qbo} on {ocp}. +* An {ocp} 4.6 or greater environment for which you have cluster administrator permissions. +* You have installed the OpenShift CLI (oc). + +.Procedure + +* Create a secret that contains the access token in the `openshift-operators` namespace: ++ +[source,terminal] +---- +$ oc create secret -n openshift-operators generic --from-literal=token= +---- diff --git a/modules/proc_creating-quay-integration-cr.adoc b/modules/proc_creating-quay-integration-cr.adoc new file mode 100644 index 000000000..fa356cf9b --- /dev/null +++ b/modules/proc_creating-quay-integration-cr.adoc @@ -0,0 +1,76 @@ +:_content-type: PROCEDURE +[id="creating-quay-integration-cr"] += Creating the QuayIntegration custom resource + +In this procedure, you will create a `QuayIntegration` custom resource, which can be completed from either the web console or from the command line. + +.Prerequisites + +* You have set up {productname} and obtained an access token. +* You have deployed the {qbo} on {ocp}. +* An {ocp} 4.6 or greater environment for which you have cluster administrator permissions. +* Optional: You have installed the OpenShift CLI (oc). + +== Optional: Creating the QuayIntegration custom resource using the CLI + +Follow this procedure to create the `QuayIntegration` custom resource using the command line. + +.Procedure + +. Create a `quay-integration.yaml`: ++ +---- +$ touch quay-integration.yaml +---- + +. Use the following configuration for a minimal deployment of the `QuayIntegration` custom resource: ++ +[source,yaml] +---- + apiVersion: quay.redhat.com/v1 + kind: QuayIntegration + metadata: + name: example-quayintegration + spec: + clusterID: openshift <1> + credentialsSecret: + namespace: openshift-operators + name: quay-integration<2> + quayHostname: https:// <3> + insecureRegistry: false <4> +---- +<1> The clusterID value should be unique across the entire ecosystem. This value is required and defaults to `openshift`. +<2> The `credentialsSecret` property refers to the namespace and name of the secret containing the token that was previously created. +<3> Replace the `QUAY_URL` with the hostname of your {productname} instance. +<4> If {productname} is using self signed certificates, set the property to `insecureRegistry: true`. ++ +For a list of all configuration fields, see "QuayIntegration configuration fields". + +. Create the `QuayIntegration` custom resource: ++ +---- +$ oc create -f quay-integration.yaml +---- + +=== Optional: Creating the QuayIntegration custom resource using the web console + +Follow this procedure to create the `QuayIntegration` custom resource using the web console. + +.Procedure + +. Open the *Administrator* perspective of the web console and navigate to *Operators* -> *Installed Operators*. + +. Click *Red Hat {qbo}*. + +. On the *Details* page of the {qbo}, click *Create Instance* on the *Quay Integration* API card. + +. On the *Create QuayIntegration* page, enter the following required information in either *Form view* or *YAML view*: ++ +* *Name*: The name that will refer to the `QuayIntegration` custom resource object. +* *Cluster ID*: The ID associated with this cluster. This value should be unique across the entire ecosystem. Defaults to `openshift` if left unspecified. +* *Credentials secret*: Refers to the namespace and name of the secret containing the token that was previously created. +* *Quay hostname*: The hostname of the Quay registry. ++ +For a list of all configuration fields, see "QuayIntegration configuration fields". + +After the `QuayIntegration` custom resource is created, your {ocp} cluster will be linked to your {productname} instance. Organizations within your {productname} registry should be created for the related namespace for the {ocp} environment. diff --git a/modules/proc_deploy_quay_add.adoc b/modules/proc_deploy_quay_add.adoc index 6f17ecf84..ddbedb6ff 100644 --- a/modules/proc_deploy_quay_add.adoc +++ b/modules/proc_deploy_quay_add.adoc @@ -14,7 +14,7 @@ three or more nodes (for example, quay01, quay02, and quay03). ==== The resulting {productname} service will listen on regular port 8080 and SSL port 8443. This is different from previous releases of {productname}, which listened on -standard ports 80 and 443, respectively. +standard ports 80 and 443, respectively. In this document, we map 8080 and 8443 to standard ports 80 and 443 on the host, respectively. Througout the rest of this document, we assume you have mapped the ports in this way. ==== @@ -53,17 +53,17 @@ the startup process. ==== + [subs="verbatim,attributes"] -``` +---- # sudo podman run --restart=always -p 443:8443 -p 80:8080 \ --sysctl net.core.somaxconn=4096 \ --privileged=true \ -v /mnt/quay/config:/conf/stack:Z \ -v /mnt/quay/storage:/datastorage:Z \ -d {productrepo}/{quayimage}:{productminv} -``` +---- . **Open browser to UI**: Once the `Quay` container has started, go to your web browser and -open the URL, to the node running the `Quay` container. +open the URL, to the node running the `Quay` container. . **Log into {productname}**: Using the superuser account you created during configuration, log in and make sure {productname} is working properly. @@ -78,7 +78,7 @@ Clair images scanning and Repository Mirroring, continue on to the next section. == Add Clair image scanning to {productname} Setting up and deploying Clair image scanning for your -{productname} deployment is described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-v4[Clair Security Scanning] +{productname} deployment is described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-v4[Clair Security Scanning] [[add-repo-mirroring]] == Add repository mirroring {productname} @@ -93,7 +93,7 @@ To add the repository mirroring feature to your {productname} cluster: `repomirror` option. * Select "Enable Repository Mirroring in the {productname} Setup tool. * Log into your {productname} Web UI and begin creating mirrored repositories -as described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index[Repository Mirroring in Red Hat Quay]. +as described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index[Repository Mirroring in {productname}]. The following procedure assumes you already have a running {productname} cluster on an OpenShift platform, with the {productname} Setup @@ -105,12 +105,12 @@ that is currently stored in `/root/ca.crt`. If not, then remove the line that ad `/root/ca.crt` to the container: + [subs="verbatim,attributes"] -``` +---- $ sudo podman run -d --name mirroring-worker \ -v /mnt/quay/config:/conf/stack:Z \ -v /root/ca.crt:/etc/pki/ca-trust/source/anchors/ca.crt \ {productrepo}/{quayimage}:{productminv} repomirror -``` +---- . **Log into config tool**: Log into the {productname} Setup Web UI (config tool). . **Enable repository mirroring**: Scroll down the Repository Mirroring section and select the Enable Repository Mirroring check box, as shown here: diff --git a/modules/proc_deploy_quay_common_superuser.adoc b/modules/proc_deploy_quay_common_superuser.adoc index 71349fa30..b7b67eebe 100644 --- a/modules/proc_deploy_quay_common_superuser.adoc +++ b/modules/proc_deploy_quay_common_superuser.adoc @@ -11,20 +11,20 @@ A `superuser` is a Quay user account that has extended privileges, including the == Adding a superuser to Quay using the UI -This section covers how to add a superuser using the Quay UI. To add a superuser using the command line interface, see the following section. +This section covers how to add a superuser using the Quay UI. To add a superuser using the command line interface, see the following section. . Start the `Quay` container in configuration mode, loading the existing configuration as a volume: + [subs="verbatim,attributes"] .... -$ sudo podman run --rm -it --name quay_config \ +$ sudo podman run --rm -it --name quay_config \ -p 8080:8080 \ -p 443:8443 \ -v $QUAY/config:/conf/stack:Z \ {productrepo}/{quayimage}:{productminv} config secret .... -. Under the `Access Settings` section of the UI, enter the name of the user (in this instance, `quayadmin`) in the `Super Users` field and click `Add`. +. Under the `Access Settings` section of the UI, enter the name of the user (in this instance, `quayadmin`) in the `Super Users` field and click `Add`. . Validate and download the `configuration` file and then terminate the `Quay` container that is running in config mode. Extract the `config.yaml` file to the configuration directory and restart the `Quay` container in registry mode: + @@ -37,15 +37,14 @@ $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} ``` - + == Editing the config.yaml file to add a superuser You can also add a superuser by editing the `config.yaml` file directly. The list of superuser accounts is stored as an array in the field `SUPER_USERS`. -* Stop the container registry if it is running, and add the `SUPER_USERS` array to the `config.yaml` file: +* Stop the container registry if it is running, and add the `SUPER_USERS` array to the `config.yaml` file: + -.$QUAY/config/config.yaml [source,yaml] ---- SERVER_HOSTNAME: quay-server.example.com @@ -57,33 +56,33 @@ SUPER_USERS: == Accessing the superuser admin panel -. Restart the Quay registry: +. Restart the Quay registry: + [subs="verbatim,attributes"] -``` +---- $ sudo podman rm -f quay $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} -``` +---- . Access the Super User Admin Panel by clicking on the current user's name or avatar in the top right-hand corner of the UI. If the user has been added as a superuser, an extra item is presented in the drop-down list called Super User Admin Panel. + image:super-user-admin-panel.png[Super User Admin Panel] === Creating a globally visible user message -Using the Superuser Admin Panel, you can create `Normal`, `Warning`, or `Error` messages for your organization. +Using the Superuser Admin Panel, you can create `Normal`, `Warning`, or `Error` messages for your organization. -. Click your user name in the top right-hand corner of the UI. Select `Super User Admin Panel`. +. Click your user name in the top right-hand corner of the UI. Select `Super User Admin Panel`. -. On the {productname} Management page, click `Globally visible user messages` on the left hand pane. +. On the {productname} Management page, click `Globally visible user messages` on the left hand pane. -. Click `Create Message` to show a drop-down menu containing `Normal`, `Warning`, and `Error` message types: +. Click `Create Message` to show a drop-down menu containing `Normal`, `Warning`, and `Error` message types: + image:create-new-message.png[Creating a new messsage] -. Enter a message by selecting `Click to set message`, then click `Create Message`. +. Enter a message by selecting `Click to set message`, then click `Create Message`. -Messages can be deleted by clicking `Options` and then `Delete Message`. +Messages can be deleted by clicking `Options` and then `Delete Message`. diff --git a/modules/proc_deploy_quay_ha_ceph.adoc b/modules/proc_deploy_quay_ha_ceph.adoc index 2ec1a50c4..331f340e6 100644 --- a/modules/proc_deploy_quay_ha_ceph.adoc +++ b/modules/proc_deploy_quay_ha_ceph.adoc @@ -1,5 +1,5 @@ == Set Up Ceph -For this Red Hat Quay configuration, we create a three-node Ceph cluster, with +For this {productname} configuration, we create a three-node Ceph cluster, with several other supporting nodes, as follows: * ceph01, ceph02, and ceph03 - Ceph Monitor, Ceph Manager and Ceph OSD nodes diff --git a/modules/proc_deploy_quay_ha_lbdb.adoc b/modules/proc_deploy_quay_ha_lbdb.adoc index be721faff..f968a469c 100644 --- a/modules/proc_deploy_quay_ha_lbdb.adoc +++ b/modules/proc_deploy_quay_ha_lbdb.adoc @@ -1,37 +1,40 @@ -== Set up Load Balancer and Database +:_content-type: PROCEDURE +[id="setting-up-load-balancer-database"] +== Setting up the HAProxy load balancer and the PostgreSQL database -On the first two systems (q01 and q02), install the haproxy load balancer and postgresql database. Haproxy will be configured as the access point and load balancer for the following services running on other systems: +Use the following procedure to set up the HAProxy load balancer and the PostgreSQL database. +.Prerequisites + +* You have installed the Podman or Docker CLI. + +.Procedure + +. On the first two systems, `q01` and `q02`, install the HAProxy load balancer and the PostgreSQL database. This configures HAProxy as the access point and load balancer for the following services running on other systems: ++ * {productname} (ports 80 and 443 on B systems) * Redis (port 6379 on B systems) * RADOS (port 7480 on C systems) -Because the services on the two systems run as containers, you will use `podman`, if it is installed. Alternatively, you could use the equivalent `docker` commands. - -[NOTE] -==== -For more information on using `podman` and restarting containers, see the section "Using podman" earlier in this document. -==== - -Here is how to set up the A systems: //. **Install and start docker service**: Install, start, and enable the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#getting_docker_in_rhel_7[docker service]. -. **Open ports for haproxy service**: Open all haproxy ports in SELinux and selected haproxy ports in the firewall: - +. Open all HAProxy ports in SELinux and selected HAProxy ports in the firewall: + -``` +[source,terminal] +---- # setsebool -P haproxy_connect_any=on # firewall-cmd --permanent --zone=public --add-port=6379/tcp --add-port=7480/tcp success # firewall-cmd --reload success -``` +---- + //. **Set up link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/load_balancer_administration/index#install_haproxy_example1[haproxy service]**: Configure the `/etc/haproxy/haproxy.cfg` to point to the systems and ports providing the {productname}, Redis, and Ceph RADOS services. Here are examples of defaults and added frontend and backend settings: -. **Set up haproxy service**: Configure the `/etc/haproxy/haproxy.cfg` to point to the systems and ports providing the {productname}, Redis, and Ceph RADOS services. Here are examples of defaults and added frontend and backend settings: +. Configure the `/etc/haproxy/haproxy.cfg` to point to the systems and ports providing the {productname}, Redis and Ceph RADOS services. The following are examples of defaults and added frontend and backend settings: + -``` +---- #--------------------------------------------------------------------- # common defaults that all the 'listen' and 'backend' sections will # use if not designated in their block @@ -82,75 +85,138 @@ backend be_rdgw server ceph02 ceph02:7480 check server ceph03 ceph03:7480 check backend be_redis -server quay01 quay01:6380 check inter 1s -server quay02 quay02:6380 check inter 1s -server quay03 quay03:6380 check inter 1s -``` - +server quay01 quay01:6379 check inter 1s +server quay02 quay02:6379 check inter 1s +server quay03 quay03:6379 check inter 1s +---- + -Once the new haproxy.cfg file is in place, restart the haproxy service. +After the new `haproxy.cfg` file is in place, restart the HAProxy service by entering the following command: + -``` +[source,terminal] +---- # systemctl restart haproxy -``` +---- -. **Install / Deploy a Database**: Install, enable and start the link:https://access.redhat.com/containers/?tab=overview#/registry.access.redhat.com/rhel8/postgresql-10)[PostgreSQL] database container. The following commands will: - -+ -* Start the PostgreSQL database with the user, password and database all set. Data from the container will be stored on the host system in the `/var/lib/pgsql/data` directory. -+ -* List available extensions. +. Create a folder for the PostgreSQL database by entering the following command: + -* Create the pg_trgm extension. -+ -* Confirm the extension is installed -+ -``` +[source,terminal] +---- $ mkdir -p /var/lib/pgsql/data +---- + +. Set the following permissions for the `/var/lib/pgsql/data` folder: ++ +[source,terminal] +---- $ chmod 777 /var/lib/pgsql/data +---- + +. Enter the following command to start the PostgreSQL database: ++ +[source,terminal] +---- $ sudo podman run -d --name postgresql_database \ -v /var/lib/pgsql/data:/var/lib/pgsql/data:Z \ -e POSTGRESQL_USER=quayuser -e POSTGRESQL_PASSWORD=quaypass \ -e POSTGRESQL_DATABASE=quaydb -p 5432:5432 \ - registry.redhat.io/rhel8/postgresql-10:1 + registry.redhat.io/rhel8/postgresql-13:1-109 +---- ++ +[NOTE] +==== +Data from the container will be stored on the host system in the `/var/lib/pgsql/data` directory. +==== +. List the available extensions by entering the following command: ++ +[source,terminal] +---- $ sudo podman exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_available_extensions" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +---- ++ +.Example output ++ +[source,terminal] +---- name | default_version | installed_version | comment -----------+-----------------+-------------------+---------------------------------------- adminpack | 1.0 | | administrative functions for PostgreSQL ... +---- +. Create the `pg_trgm` extension by entering the following command: ++ +[source,terminal] +---- $ sudo podman exec -it postgresql_database /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm;" | /opt/rh/rh-postgresql96/root/usr/bin/psql -d quaydb' +---- +. Confirm that the `pg_trgm` has been created by entering the following command: ++ +[source,terminal] +---- $ sudo podman exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_extension" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +---- ++ +.Example output ++ +[source,terminal] +---- extname | extowner | extnamespace | extrelocatable | extversion | extconfig | extcondition ---------+----------+--------------+----------------+------------+-----------+-------------- plpgsql | 10 | 11 | f | 1.0 | | pg_trgm | 10 | 2200 | t | 1.3 | | (2 rows) +---- +. Alter the privileges of the Postgres user `quayuser` and grant them the `superuser` role to give the user unrestricted access to the database: ++ +[source,terminal] +---- $ sudo podman exec -it postgresql_database /bin/bash -c 'echo "ALTER USER quayuser WITH SUPERUSER;" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +---- ++ +.Example output ++ +[source,terminal] +---- ALTER ROLE +---- -``` - -. **Open the firewall**: If you have a firewalld service active on your system, run the following commands to make the PostgreSQL port available through the firewall: - +. If you have a firewalld service active on your system, run the following commands to make the PostgreSQL port available through the firewall: + -``` +[source,terminal] +---- # firewall-cmd --permanent --zone=trusted --add-port=5432/tcp -success +---- ++ +[source,terminal] +---- # firewall-cmd --reload -success -``` - -. **Test PostgreSQL Connectivity**: Use the `psql` command to test connectivity to the PostgreSQL database. Try this on a remote system as well, to make sure you can access the service remotely: +---- +. Optional. If you do not have the `postgres` CLI package installed, install it by entering the following command: + -``` +[source,terminal] +---- # yum install postgresql -y +---- +. Use the `psql` command to test connectivity to the PostgreSQL database. ++ +[NOTE] +==== +To verify that you can access the service remotely, run the following command on a remote system. +==== ++ +---- # psql -h localhost quaydb quayuser +---- ++ +.Example output ++ +[source,terminal] +---- Password for user test: psql (9.2.23, server 9.6.5) WARNING: psql version 9.2, server version 9.6. @@ -158,4 +224,4 @@ WARNING: psql version 9.2, server version 9.6. Type "help" for help. test=> \q -``` +---- diff --git a/modules/proc_deploy_quay_poc_conf.adoc b/modules/proc_deploy_quay_poc_conf.adoc index 9dd98941c..b17fa607f 100644 --- a/modules/proc_deploy_quay_poc_conf.adoc +++ b/modules/proc_deploy_quay_poc_conf.adoc @@ -1,39 +1,62 @@ +:_content-type: PROCEDURE +[id="poc-configuring-quay"] = Configuring {productname} -Before running the {productname} service, you need to generate a configuration file that details of all the components, including registry settings, and database and Redis connection parameters. -. To generate a configuration file, run the `Quay` container in `config` mode, specifying a password, for example, the string `secret`. +Use the following procedure to generate a configuration file that details all components, including registry settings, the database, and Redis connection parameters. + +.Procedure + +. To generate a configuration file, enter the following command to run the `Quay` container in `config` mode. You must specify a password, for example, the string `secret`: + [subs="verbatim,attributes"] -.... +---- $ sudo podman run --rm -it --name quay_config -p 80:8080 -p 443:8443 {productrepo}/{quayimage}:{productminv} config secret -.... +---- -. Use your browser to access the user interface for the configuration tool at `\http://quay-server.example.com`. Note this documentation assumes you have configured the `quay-server.example.com` hostname in your `/etc/hosts` file. +. Use your browser to access the user interface for the configuration tool at `\http://quay-server.example.com`. ++ +[NOTE] +==== +This documentation assumes that you have configured the `quay-server.example.com` hostname in your `/etc/hosts` file. +==== -. Log in with the username `quayconfig` and password `secret`, or whatever values were specified in the `podman run` command above. +. Log in with username and password specified + +. Log in with the username and password you set in Step 1 of xref:poc-configuring-quay[Configuring {productname}]. ++ +[NOTE] +==== +If you followed this procedure, the username is *quayconfig* and the password is *secret*. +==== +[id="poc-quay-setup"] == {productname} setup -In the configuration editor, the following details are entered: +In the {productname} configuration editor, you must enter the following credentials: * Basic configuration * Server configuration * Database * Redis - +[id="poc-basic-configuration"] === Basic configuration -In the basic configuration setting, complete the registry title and the registry short title fields. The default values can be used if they are populated. +Under *Basic Configuration*, populate the *Registry Title* and *Registry Title Short* fields. The default values can be used if they are populated. === Server configuration -Specify the HTTP host and port for the location where the registry will be accessible on the network. If you followed the instructions in this document, enter `quay-server.example.com`. +Under *Server Hostname*, specify the HTTP host and port for the location where the registry will be accessible on the network. + +If you followed the instructions in this documenter, enter `quay-server.example.com`. +[id="poc-database"] === Database -In the database section, specify connection details for the database that {productname} uses to store metadata. If you followed the instructions in this document for deploying a proof-of-concept system, the following values would be entered: +In the *Database* section, specify the connection details for the database that {productname} uses to store metadata. + +If you followed the instructions in this document for deploying a proof of concept system, enter the following values: * **Database Type:** Postgres * **Database Server:** quay-server.example.com:5432 @@ -41,16 +64,20 @@ In the database section, specify connection details for the database that {produ * **Password:** quaypass * **Database Name:** quay +[id="poc-redis"] === Redis -The Redis key-value store is used to store real-time events and build logs. If you followed the instructions in this document for deploying a proof-of-concept system, the following values would be entered: +The Redis key-value store is used to store real-time events and build logs. + +If you followed the instructions in this document for deploying a proof-of-concept system, enter the following credentials under the *Redis* section: * **Redis Hostname:** quay-server.example.com * **Redis port:** 6379 (default) * **Redis password:** strongpassword +[id="poc-validating"] == Validate and download configuration -When all required fields have been set, validate your settings by clicking `Validate Configuration Changes`. If any errors are reported, continue editing your configuration until all required fields are valid and {productname} can connect to your database and Redis servers. +After all required fields have been set, validate your settings by clicking *Validate Configuration Changes*. If any errors are reported, continue editing your configuration until the settings are valid and {productname} can connect to your database and Redis servers. -Once your configuration is valid, download the `configuration` file. Stop the `Quay` container that is running the configuration editor. +After validation, download the *Configuration* file. Stop the `Quay` container that is running the configuration editor. \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_db.adoc b/modules/proc_deploy_quay_poc_db.adoc index 47dd42b8c..2da302457 100644 --- a/modules/proc_deploy_quay_poc_db.adoc +++ b/modules/proc_deploy_quay_poc_db.adoc @@ -1,21 +1,35 @@ +:_content-type: PROCEDURE +[id="poc-configuring-database"] + = Configuring the database -Quay requires a database for storing metadata. Postgres is used throughout this document and is recommended for highly available configurations. Alternatively, you can use MySQL with a similar approach to configuration as described below. +{productname} requires a database for storing metadata. Postgres is used throughout this document and is recommended for highly available configurations. Alternatively, you can use MySQL with a similar approach to configuration as described below. +[id="poc-setting-up-postgres"] == Setting up Postgres -In this proof-of-concept scenario, you will use a directory on the local file system to persist database data. +For the {productname} proof of concept, a directory on the local file system to persist database data is used. + +.Procedure -. In the installation folder, denoted here by the variable $QUAY, create a directory for the database data and set the permissions appropriately: +. In the installation folder, denoted here by the `$QUAY` variable, create a directory for the database data by entering the following command: + -.... +[source,terminal] +---- $ mkdir -p $QUAY/postgres-quay +---- + +. Set the appropriate permissions by entering the following command: ++ +[source,terminal] +---- $ setfacl -m u:26:-wx $QUAY/postgres-quay -.... -. Use `podman run` to start the `Postgres` container, specifying the username, password, database name and port, together with the volume definition for database data: +---- + +. Start the `Postgres` container, specifying the username, password, and database name and port, with the volume definition for database data: + [subs="verbatim,attributes"] -.... +---- $ sudo podman run -d --rm --name postgresql-quay \ -e POSTGRESQL_USER=quayuser \ -e POSTGRESQL_PASSWORD=quaypass \ @@ -23,21 +37,18 @@ $ sudo podman run -d --rm --name postgresql-quay \ -e POSTGRESQL_ADMIN_PASSWORD=adminpass \ -p 5432:5432 \ -v $QUAY/postgres-quay:/var/lib/pgsql/data:Z \ - registry.redhat.io/rhel8/postgresql-10:1 -.... -. Ensure that the Postgres `pg_trgm` module is installed, as it is required by Quay: + {postgresimage} +---- + +. Ensure that the Postgres `pg_trgm` module is installed by running the following command: + -.... +[source,terminal] ++ +---- $ sudo podman exec -it postgresql-quay /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm" | psql -d quay -U postgres' -.... - -//// -== Determine the IP address of the database server - -Use the `podman inspect` command to determine the IP address for the database. You will need this information when using the configuration editor later. - -.... -$ sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay -10.88.0.18 -.... -//// +---- ++ +[NOTE] +==== +The `pg_trgm` module is required for the `Quay` container. +==== \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_dns.adoc b/modules/proc_deploy_quay_poc_dns.adoc index 18cc7a2f4..70211116e 100644 --- a/modules/proc_deploy_quay_poc_dns.adoc +++ b/modules/proc_deploy_quay_poc_dns.adoc @@ -36,7 +36,7 @@ Edit the file `/etc/cni/net.d/87-podman-bridge.conflist` and add a stanza for th   "cniVersion": "0.4.0",   "name": "podman",   "plugins": [ - ... + ... {       "type": "tuning"     }, @@ -65,16 +65,16 @@ The `web` container should respond to the `client` request with the message `pod == Using names in the configuration tool -The same basic commands are used to deploy the database and Redis for {productname}, but in the configuration tool you can now use names rather than IP addresses. +The same basic commands are used to deploy the database and Redis for {productname}, but in the configuration tool you can now use names rather than IP addresses. * Stop the `Quay` container if it is running and start it up in config mode, specifying the config volume if it already exists: + [subs="verbatim,attributes"] -.... +---- $ sudo podman run --rm -it --name quay_config -p 8080:8080 \ -v $QUAY/config:/conf/stack:Z \ {productrepo}/{quayimage}:{productminv} config secret -.... +---- * Update the database and Redis configuration to use the container name rather than the IP address: ** **Database Type:** Postgres @@ -94,13 +94,13 @@ $ sudo podman run --rm -it --name quay_config -p 8080:8080 \ Restart the `Quay` container, specifying the appropriate volumes for your configuration data and local storage for image data: [subs="verbatim,attributes"] -.... +---- $ sudo podman run --rm -p 8080:8080 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} -.... +---- Confirm that the redeployment has been successful after the switch to using the naming service. Log in to quay with the user you created earlier, either using the UI at `quay-server:8080` or via the command line using `sudo podman login --tls-verify=false quay-server:8080`. diff --git a/modules/proc_deploy_quay_poc_redis.adoc b/modules/proc_deploy_quay_poc_redis.adoc index 693483cd6..88816138d 100644 --- a/modules/proc_deploy_quay_poc_redis.adoc +++ b/modules/proc_deploy_quay_poc_redis.adoc @@ -1,26 +1,22 @@ +:_content-type: PROCEDURE +[id="poc-configuring-redis"] = Configuring Redis -Redis ia a key-value store that is used by Quay for live builder logs and the {productname} tutorial. - +Redis ia a key-value store that is used by {productname} for live builder logs and the {productname} tutorial. +[id="poc-setting-up-redis"] == Setting up Redis -* Use `podman run` to start the `Redis` container, specifying the port and password: +Use the following procedure to deploy the `Redis` container for the {productname} proof of concept. + +.Procedure + +* Start the `Redis` container, specifying the port and password, by entering the following command: + -.... +[subs="verbatim,attributes"] +---- $ sudo podman run -d --rm --name redis \ -p 6379:6379 \ -e REDIS_PASSWORD=strongpassword \ - registry.redhat.io/rhel8/redis-5:1 -.... - -//// -== Determine the IP address of the Redis server - -Use the `podman inspect` command to determine the IP address for Redis. You will need this information when using the configuration editor later. - -.... -$ sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" redis -10.88.0.18 -.... -//// + {redisimage} +---- \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_restart.adoc b/modules/proc_deploy_quay_poc_restart.adoc index 65dfe6e52..57f97cb4f 100644 --- a/modules/proc_deploy_quay_poc_restart.adoc +++ b/modules/proc_deploy_quay_poc_restart.adoc @@ -1,6 +1,6 @@ = Restarting containers -Because the `--restart` option is not fully supported by podman, you can configure `podman` as a systemd service, as described +Because the `--restart` option is not fully supported by podman, you can configure `podman` as a systemd service, as described in link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#porting-containers-to-systemd-using-podman_building-running-and-managing-containers[Porting containers to systemd using Podman] @@ -88,9 +88,9 @@ Once you have the services configured and enabled, reboot the system. When the .... $ sudo podman ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4e87c7889246 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 19 seconds ago Up 18 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay -b8fbac1920d4 registry.redhat.io/rhel8/redis-5:1 run-redis 19 seconds ago Up 18 seconds ago 0.0.0.0:6379->6379/tcp redis -d959d5bf7a24 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 18 seconds ago Up 18 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 +4e87c7889246 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 19 seconds ago Up 18 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +b8fbac1920d4 registry.redhat.io/rhel8/redis-6:1-110) run-redis 19 seconds ago Up 18 seconds ago 0.0.0.0:6379->6379/tcp redis +d959d5bf7a24 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 18 seconds ago Up 18 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 e75ff8651dbd registry.redhat.io/quay/clair-rhel8:v3.4.0 18 seconds ago Up 17 seconds ago 0.0.0.0:8081->8080/tcp clairv4 .... @@ -136,9 +136,9 @@ Once you have updated the Quay service configuration, reboot the server and imme .... $ sudo podman ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4e87c7889246 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 29 seconds ago Up 28 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay -b8fbac1920d4 registry.redhat.io/rhel8/redis-5:1 run-redis 29 seconds ago Up 28 seconds ago 0.0.0.0:6379->6379/tcp redis -d959d5bf7a24 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 28 seconds ago Up 28 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 +4e87c7889246 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 29 seconds ago Up 28 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +b8fbac1920d4 registry.redhat.io/rhel8/redis-6:1-110) run-redis 29 seconds ago Up 28 seconds ago 0.0.0.0:6379->6379/tcp redis +d959d5bf7a24 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 28 seconds ago Up 28 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 e75ff8651dbd registry.redhat.io/quay/clair-rhel8:v3.4.0 28 seconds ago Up 27 seconds ago 0.0.0.0:8081->8080/tcp clairv4 .... @@ -147,10 +147,10 @@ Initially, the `Quay` container will not be available, but once the `RestartSec` .... $ sudo podman ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4e87c7889246 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 35 seconds ago Up 34 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +4e87c7889246 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 35 seconds ago Up 34 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay ab9f0e6ad7c3 registry.redhat.io/quay/quay-rhel8:v3.4.0 registry 3 seconds ago Up 2 seconds ago 0.0.0.0:8080->8080/tcp quay -b8fbac1920d4 registry.redhat.io/rhel8/redis-5:1 run-redis 35 seconds ago Up 34 seconds ago 0.0.0.0:6379->6379/tcp redis -d959d5bf7a24 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 34 seconds ago Up 34 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 +b8fbac1920d4 registry.redhat.io/rhel8/redis-6:1-110) run-redis 35 seconds ago Up 34 seconds ago 0.0.0.0:6379->6379/tcp redis +d959d5bf7a24 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 34 seconds ago Up 34 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 e75ff8651dbd registry.redhat.io/quay/clair-rhel8:v3.4.0 34 seconds ago Up 33 seconds ago 0.0.0.0:8081->8080/tcp clairv4 .... diff --git a/modules/proc_deploy_quay_poc_rhel.adoc b/modules/proc_deploy_quay_poc_rhel.adoc index caaa370a6..1c0e5d608 100644 --- a/modules/proc_deploy_quay_poc_rhel.adoc +++ b/modules/proc_deploy_quay_poc_rhel.adoc @@ -1,86 +1,128 @@ -= Configuring the Red Hat Enterprise Linux server +:_content-type: PROCEDURE +[id="poc-configuring-rhel-server"] += Preparing Red Hat Enterprise Linux for a {productname} proof of concept deployment +Use the following procedures to configure {rhel} for a {productname} proof of concept deployment. +[id="poc-install-register-rhel-server"] == Install and register the RHEL server -. Install the latest RHEL 8 server. You can do a minimal, shell-access only install, or Server plus GUI if you want a desktop. +Use the following procedure to configure the {rhel} server for a {productname} proof of concept deployment. -. Register and subscribe your RHEL server system as described in link:https://access.redhat.com/solutions/253273[How to register and subscribe a system...]. -. Use the following commands to register your system and list available subscriptions. Choose an available RHEL server subscription, attach to its pool ID and upgrade to the latest software: +.Procedure + +. Install the latest {rhel-short} 8 server. You can do a minimal, shell-access only install, or Server plus GUI if you want a desktop. + +. Register and subscribe your {rhel-short} server system as described in link:https://access.redhat.com/solutions/253273[How to register and subscribe a RHEL system to the Red Hat Customer Portal using Red Hat Subscription-Manager] + +. Enter the following commands to register your system and list available subscriptions. Choose an available {rhel-short} server subscription, attach to its pool ID, and upgrade to the latest software: + -.... +[source,terminal] +---- # subscription-manager register --username= --password= # subscription-manager refresh # subscription-manager list --available # subscription-manager attach --pool= # yum update -y -.... +---- +[id="poc-installing-podman"] == Installing Podman -* Install Podman if it is not already on your system: +Use the following procedure to install Podman. + +.Procedure + +* Enter the following command to install Podman: + -.... +[source,terminal] +---- $ sudo yum install -y podman -.... +---- -* Alternatively, you can install the `container-tools` module, which pulls in the full set of container software packages: +* Alternatively, you can install the `container-tools` module, which pulls in the full set of container software packages: + -.... +[source,terminal] +---- $ sudo yum module install -y container-tools -.... +---- +[id="poc-registry-authentication"] == Registry authentication -* Set up authentication to `registry.redhat.io`, so that you can pull the `Quay` container, as described in link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication]. Note that this differs from earlier {productname} releases where the images were hosted on `quay.io`. +Use the following procedure to authenticate your registry for a {productname} proof of concept. + +.Procedure + +. Set up authentication to `registry.redhat.io` by following the link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication] procedure. Setting up authentication allows you to pull the `Quay` container. + -You can log in to the registry using the following command: +[NOTE] +==== +This differs from earlier versions of {productname}, when the images were hosted on Quay.io. +==== + +. Enter the following command to log in to the registry: + -.... +[source,terminal] +---- $ sudo podman login registry.redhat.io -Username: -Password: -.... +---- ++ +You are prompted to enter your `username` and `password`. +[id=poc-firewall-configuration] == Firewall configuration -* If you have a firewall running on your system, you might have to add rules that allow access to {productname}. The commands required depend on the ports you have mapped, for example: +If you have a firewall running on your system, you might have to add rules that allow access to {productname}. Use the following procedure to configure your firewall for a proof of concept deployment. + +.Procedure + +* The commands required depend on the ports that you have mapped on your system, for example: + -.... +[source,terminal] +---- $ firewall-cmd --permanent --add-port=80/tcp $ firewall-cmd --permanent --add-port=443/tcp $ firewall-cmd --permanent --add-port=5432/tcp $ firewall-cmd --permanent --add-port=5433/tcp $ firewall-cmd --permanent --add-port=6379/tcp $ firewall-cmd --reload -.... +---- + -[[ip-naming]] +[id="poc-ip-naming"] == IP addressing and naming services -There are a number of ways to configure the component containers in {productname} so that they can talk to each other: +There are several ways to configure the component containers in {productname} so that they can communicate with each other, for example: -* **Using the IP addresses for the containers:** You can determine the IP address for containers with `podman inspect` and then use these values in the configuration tool when specifying the connection strings, for example: +* **Using the IP addresses for the containers**. You can determine the IP address for containers with `podman inspect` and then use the values in the configuration tool when specifying the connection strings, for example: + -.... +[source,terminal] +---- $ sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay -.... +---- + This approach is susceptible to host restarts, as the IP addresses for the containers will change after a reboot. -* **Using a naming service:** If you want your deployment to survive container restarts, which typically result in changed IP addresses, you can implement a naming service. For example, the link:https://github.com/containers/dnsname[dnsname] plugin is used to allow containers to resolve each other by name. -* **Using the host network:** You can use the `podman run` command with the `--net=host` option and then use container ports on the host when specifying the addresses in the configuration. This option is susceptible to port conflicts when two containers want to use the same port, and as a result it is not recommended. -* **Configuring port mapping:** You can use port mappings to expose ports on the host and then use these ports in combination with the host IP address or host name. -This document uses port mapping and assumes a static IP address for your host system. Throughout this deployment, we use `quay-server.example.com` with our system's IP address, `192.168.1.112`, and establish this information in the `/etc/hosts` file: +* **Using a naming service**. If you want your deployment to survive container restarts, which typically result in changed IP addresses, you can implement a naming service. For example, the link:https://github.com/containers/dnsname[dnsname] plugin is used to allow containers to resolve each other by name. -.... +* **Using the host network**. You can use the `podman run` command with the `--net=host` option and then use container ports on the host when specifying the addresses in the configuration. This option is susceptible to port conflicts when two containers want to use the same port. This method is not recommended. + +* **Configuring port mapping**. You can use port mappings to expose ports on the host and then use these ports in combination with the host IP address or host name. + +This document uses port mapping and assumes a static IP address for your host system. Throughout the deployment, `quay-sever.example.com` is used with the `192.168.1.112` IP address. This information is established in the `/etc/hosts` file, for example: +[source,terminal] +---- $ cat /etc/hosts -... +---- +Example output: +[source,terminal] +---- 192.168.1.112 quay-server.example.com -.... - +---- +.Sample proof of concept port mapping [%header, cols="2,1,1"] |=== |Component diff --git a/modules/proc_deploy_quay_poc_run.adoc b/modules/proc_deploy_quay_poc_run.adoc index 21d5e1ce9..312e57795 100644 --- a/modules/proc_deploy_quay_poc_run.adoc +++ b/modules/proc_deploy_quay_poc_run.adoc @@ -1,42 +1,82 @@ +:_content-type: PROCEDURE +[id="poc-deploying-quay"] = Deploying {productname} - == Prerequisites -* Your Quay database and Redis servers are running. -* You have generated a valid configuration bundle. -* You have stopped the `Quay` container that you used to run the configuration editor. +* The {productname} database is running. +* The Redis server is running. +* You have generated a valid configuration file. +* You have stopped the `Quay` container that was running the configuration editor. + +[id="preparing-configuration-folder"] +== Preparing the configuration folder +Use the following procedure to prepare your {productname} configuration folder. -== Prepare config folder +.Procedure -* Unpack the configuration bundle so that Quay can use it: +. Create a directory to copy the {productname} configuration bundle to: + -.... +[source,terminal] +---- $ mkdir $QUAY/config -$ cp ~/Downloads/quay-config.tar.gz $QUAY/config +---- + +. Copy the generated {productname} configuration bundle to the directory: ++ +[source,terminal] +---- +$ cp ~/Downloads/quay-config.tar.gz ~/config +---- + +. Change into the the directory: ++ +[source,terminal] +---- $ cd $QUAY/config +---- + +. Unpack the {productname} configuration bundle: ++ +[source,terminal] +---- $ tar xvf quay-config.tar.gz -.... +---- +[id="preparing-local-storage"] == Prepare local storage for image data -* For this proof-of-concept deployment, use the local file system to store the registry images: +Use the following procedure to set your local file system to store registry images. + +.Procedure + +. Create a local directory that will store registry images by entering the following command: + -.... +[source,terminal] +---- $ mkdir $QUAY/storage +---- + +. Set the directory to store registry images: ++ +[source,terminal] +---- $ setfacl -m u:1001:-wx $QUAY/storage -.... +---- +[id="deploy-quay-registry"] == Deploy the {productname} registry -* Use `podman run` to start the `Quay` container. Specify the appropriate volumes for your configuration data and local storage for image data: +. Use the following procedure to deploy the `Quay` registry container. + +. Enter the following command to start the `Quay` registry container, specifying the appropriate volumes for configuration data and local storage for image data: + [subs="verbatim,attributes"] -.... +---- $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} -.... +---- diff --git a/modules/proc_deploy_quay_single.adoc b/modules/proc_deploy_quay_single.adoc index 5fe2a4906..00c92401f 100644 --- a/modules/proc_deploy_quay_single.adoc +++ b/modules/proc_deploy_quay_single.adoc @@ -9,7 +9,7 @@ Follow these steps to install {productname} on a single system (VM or bare metal This procedure was tested on RHEL 7. The `docker` command is not included in RHEL 8, so you would need to use the `podman` command instead. Because the `--restart` option is not supported by podman, instead of using `--restart`, -you could set up to use `podman` as a systemd service, as described +you could set up to use `podman` as a systemd service, as described in link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#starting_containers_with_systemd[Starting containers with systemd]. ==== @@ -43,7 +43,7 @@ active .... . **Open ports in firewall**: If you have a firewall running on your system, -to access the Red Hat Quay config tool (port 8443) and application (ports 80 and 443) +to access the {productname} config tool (port 8443) and application (ports 80 and 443) outside of the local system, run the following commands (add `--zone=` for each command to open ports on a particular zone): + .... diff --git a/modules/proc_installing-qbo-on-ocp.adoc b/modules/proc_installing-qbo-on-ocp.adoc new file mode 100644 index 000000000..d8075bd5f --- /dev/null +++ b/modules/proc_installing-qbo-on-ocp.adoc @@ -0,0 +1,20 @@ +:_content-type: PROCEDURE +[id="installing-qbo-on-ocp"] += Installing the {qbo} on {ocp} + +In this procedure, you will install the {qbo} on {ocp}. + +.Prerequiites + +* You have set up {productname} and obtained an Access Token. +* An {ocp} 4.6 or greater environment for which you have cluster administrator permissions. + +.Procedure + +. Open the *Administrator* perspective of the web console and navigate to *Operators* → *OperatorHub* on the navigation pane. + +. Search for `{qbo}`, click the *{qbo}* title, and then click *Install*. + +. Select the version to install, for example, *stable-3.7*, and then click *Install*. + +. Click *View Operator* when the installation finishes to go to the {qbo}'s *Details* page. Alternatively, you can click *Installed Operators* → *Red Hat Quay Bridge Operator* to go to the *Details* page. diff --git a/modules/proc_manage-advanced-config.adoc b/modules/proc_manage-advanced-config.adoc index b777d398d..b72494a67 100644 --- a/modules/proc_manage-advanced-config.adoc +++ b/modules/proc_manage-advanced-config.adoc @@ -1,249 +1,284 @@ -[[advanced-quay-configuration]] +:_content-type: CONCEPT + +[id="advanced-quay-configuration"] = Advanced {productname} configuration -You can configure your {productname} after initial deployment using -several different interfaces: +You can configure your {productname} after initial deployment using one of the following interfaces: -* The {productname} Config Tool: Running the `Quay` container in `config` mode -presents a Web-based interface for configuring the {productname} cluster. This -is the recommended method for most configuration of the {productname} service itself. +* The {productname} Config Tool. With this tool, a web-based interface for configuring the {productname} cluster is provided when running the `Quay` container in `config` mode. This method is recommended for configuring the {productname} service. -* Editing the `config.yaml`: The `config.yaml` file holds most of the configuration -information for the {productname} cluster. Editing that file directly is possible, -but it is only recommended for advanced tuning and performance features that are -not available through the Config Tool. +* Editing the `config.yaml`. The `config.yaml` file contains most configuration information for the {productname} cluster. Editing the `config.yaml` file directly is possible, but it is only recommended for advanced tuning and performance features that are not available through the Config Tool. -* {productname} API: Some {productname} configuration can be done through the API. +* {productname} API. Some {productname} features can be configured through the API. -While configuration for specific features is covered in separate sections, this -section describes how to use each of those interfaces and perform some -more advanced configuration. +This content in this section describes how to use each of the aforementioned interfaces and how to configure your deployment with advanced features. [[using-the-config-tool]] == Using {productname} Config Tool to modify {productname} -The {productname} Config Tool is made available by running a `Quay` container -in `config` mode alongside the regular {productname} service. Running the -Config Tool is different for {productname} clusters running on OpenShift than -it is for those running directly on host systems. +The {productname} Config Tool is made available by running a `Quay` container in `config` mode alongside the regular {productname} service. + +Use the following sections to run the Config Tool from the {productname} Operator, or to run the Config Tool on host systems from the command line interface (CLI). + +[id="running-config-tool-from-quay-operator"] === Running the Config Tool from the {productname} Operator -If you are running the {productname} Operator from OpenShift, the Config Tool -is probably already available for you to use. To access the Config Tool, -do the following: -. From the OpenShift console, select the project in which {productname} -is running. For example, quay-enterprise. +When running the {productname} Operator on {ocp}, the Config Tool is readily available to use. Use the following procedure to access the {productname} Config Tool. + +.Prerequisites + +. You have deployed the {productname} Operator on {ocp}. + +.Procedure. + +. On the OpenShift console, select the {productname} project, for example, `quay-enterprise`. -. From the left column, select Networking -> Routes. You should see routes to -both the {productname} application and Config Tool, as shown in -the following image: +. In the navigation pane, select *Networking* -> *Routes*. You should see routes to both the {productname} application and Config Tool, as shown in the following image: + image:configtoolroute.png[View the route to the {productname} Config Tool] -. Select the route to the Config Tool (for example, example-quayecosystem-quay-config) and select it. -The Config tool Web UI should open in your browser. +. Select the route to the Config Tool, for example, `example-quayecosystem-quay-config`. The Config Tool UI should open in your browser. -. Select `Modify configuration for this cluster`. You should see the -Config Tool, ready for you to change features -of your {productname} cluster, as shown in the following image: +. Select *Modify configuration for this cluster* to bring up the Config Tool setup, for example: + image:configtoolsetup.png[Modify {productname} cluster settings from the Config Tool] -. When you have made the changes you want, select `Save Configuration Changes`. -The Config Tool will validate your changes. +. Make the desired changes, and then select *Save Configuration Changes*. -. Make any corrections as needed by selecting `Continue Editing` -or select `Next` to continue on. +. Make any corrections needed by clicking *Continue Editing*, or, select *Next* to continue. -. When prompted, it is recommended that you select `Download Configuration`. -That will download a tarball of your new `config.yaml`, as well as any -certificates and keys used with your {productname} setup. +. When prompted, select *Download Configuration*. This will download a tarball of your new `config.yaml`, as well as any certificates and keys used with your {productname} setup. The `config.yaml` can be used to make advanced changes to your configuration or use as a future reference. -. Select `Go to deployment rollout`, then -`Populate the configuration to deployments`. The {productname} -pods will be restarted and the changes will take effect. - -The `config.yaml` file you saved can be used to make advanced -changes to your configuration or just kept for future reference. +. Select *Go to deployment rollout* -> *Populate the configuration to deployments*. Wait for the {productname} pods to restart for the changes to take effect. +[id="running-config-tool-from-cli"] === Running the Config Tool from the command line -If you are running {productname} directly from a host system, -using tools such as the `podman` or `docker` commands, -after the initial {productname} deployment, you can restart the -Config Tool to modify your {productname} cluster. Here's how: - -. **Start quay in config mode**: On the first `quay` node run the following, replacing -`my-secret-password` with your password. If you would like to modify an existing config bundle, -you can simply mount your configuration directory into the `Quay` container as you would in registry mode. + +If you are running {productname} from a host system, you can use the following procedure to make changes to your configuration after the initial deployment. + +. Prerequisites + +* You have installed either `podman` or `docker`. + +. Start {productname} in configuration mode. + +. On the first `Quay` node, enter the following command: + [subs="verbatim,attributes"] -.... -# podman run --rm -it --name quay_config -p 8080:8080 \ +---- +$ podman run --rm -it --name quay_config -p 8080:8080 \ -v path/to/config-bundle:/conf/stack \ - {productrepo}/{quayimage}:{productminv} config my-secret-password -.... + {productrepo}/{quayimage}:{productminv} config +---- ++ +[NOTE] +==== +To modify an existing config bundle, you can mount your configuration directory into the `Quay` container. +==== + +. When the {productname} configuration tool starts, open your browser and navigate to the URL and port used in your configuration file, for example, `quay-server.example.com:8080`. + +. Enter your username and password. -. **Open browser**: When the quay configuration tool starts up, open a browser to the URL and port 8080 -of the system you are running the configuration tool on -(for example https://myquay.example.com:8080). You are prompted for a username and password. +. Modify your {productname} cluster as desired. -At this point, you can begin modifying your {productname} cluster as described earlier. +[id="deploying-config-tool-using-tls"] +=== Deploying the config tool using TLS certificates -[[overview-advanced-config]] +You can deploy the config tool with secured TLS certificates by passing environment variables to the runtime variable. This ensures that sensitive data like credentials for the database and storage backend are protected. + +The public and private keys must contain valid Subject Alternative Names (SANs) for the route that you deploy the config tool on. + +The paths can be specified using `CONFIG_TOOL_PRIVATE_KEY` and `CONFIG_TOOL_PUBLIC_KEY`. + +If you are running your deployment from a container, the `CONFIG_TOOL_PRIVATE_KEY` and `CONFIG_TOOL_PUBLIC_KEY` values the locations of the certificates inside of the container. For example: + +[source,terminal] +---- +$ podman run --rm -it --name quay_config -p 7070:8080 \ + +-v ${PRIVATE_KEY_PATH}:/tls/localhost.key \ +-v ${PUBLIC_KEY_PATH}:/tls/localhost.crt \ +-e CONFIG_TOOL_PRIVATE_KEY=/tls/localhost.key \ +-e CONFIG_TOOL_PUBLIC_KEY=/tls/localhost.crt \ +-e DEBUGLOG=true \ +-ti config-app:dev +---- + +[id="overview-advanced-config"] == Using the API to modify {productname} + See the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_api_guide/index[{productname} API Guide] for information on how to access {productname} API. +[id="editing-config-file-to-modify-quay"] == Editing the `config.yaml` file to modify {productname} -Some advanced {productname} configuration that is not available through -the Config Tool can be achieved by editing the `config.yaml` file directly. -Available settings are described in the -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/quay-schema[Schema for Red Hat Quay configuration] -The following are examples of settings you can change directly in the `config.yaml` file. +Some advanced configuration features that are not available through the Config Tool can be implemented by editing the `config.yaml` file directly. Available settings are described in the +link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/quay-schema[Schema for {productname} configuration] + +The following examples are settings you can change directly in the `config.yaml` file. + +[id="add-name-and-company-to-quay-sign-in"] === Add name and company to {productname} sign-in -Setting the following will cause users to be prompted for their name and -company when they first sign in. Although this is optional, it can provide -you with extra data about your {productname} users: -+ -`FEATURE_USER_METADATA: true` +By setting the following field, users are prompted for their name and company when they first sign in. This is an optional field, but can provide your with extra data about your {productname} users. +[source,yaml] +---- +--- +FEATURE_USER_METADATA: true +--- +---- + +[id="disable-tls-protocols"] === Disable TLS Protocols -You can change the SSL_PROTOCOLS setting to remove SSL protocols that you -do not want to support in your {productname} instance. For example, to remove -TLS v1 support from the default -SSL_PROTOCOLS : ['TLSv1','TLSv1.1','TLSv1.2'], change it as follows: -+ +You can change the `SSL_PROTOCOLS` setting to remove SSL protocols that you do not want to support in your {productname} instance. For example, to remove TLS v1 support from the default `SSL_PROTOCOLS:['TLSv1','TLSv1.1','TLSv1.2']`, change it to the following: + +[source,yaml] +---- +--- SSL_PROTOCOLS : ['TLSv1.1','TLSv1.2'] +--- +---- +[id="rate-limit-api-calls"] === Rate limit API calls -Adding the FEATURE_RATE_LIMITS parameter to the `config.yaml` causes `nginx` to -limit certain API calls to 30 per second. If that feature is not set, API calls -are limied to 300 per second (effectively unlimited). -Rate limiting can be an important feature, if you need to make sure the resources -available are not overwhelmed with traffic. +Adding the `FEATURE_RATE_LIMITS` parameter to the `config.yaml` file causes `nginx` to limit certain API calls to 30-per-second. If `FEATURE_RATE_LIMITS` is not set, API calls are limited to 300-per-second, effectively making them unlimited. + +Rate limiting is important when you must ensure that the available resources are not overwhelmed with traffic. -Some namespace may require unlimited access (perhaps they are important to CI/CD -and take priority, for example). In this case, those namespace may be placed in -a list in `config.yaml` for NON_RATE_LIMITED_NAMESPACES. +Some namespaces might require unlimited access, for example, if they are important to CI/CD and take priority. In that scenario, those namespaces might be placed in a list in the `config.yaml` file using the `NON_RATE_LIMITED_NAMESPACES`. +[id="adjust-database-connection-pool"] === Adjust database connection pooling {productname} is composed of many different processes which all run within the same container. Many of these processes interact with the database. -If enabled, each process that interacts with the database will contain a -connection pool. These per-process connection pools are configured to maintain -a maximum of 20 connections. Under heavy load, it is possible to fill the -connection pool for every process within a {productname} container. Under certain -deployments and loads, this may require analysis to ensure {productname} does not -exceed the database's configured maximum connection count. +With the `DB_CONNECTION_POOLING` parameter, each process that interacts with the database will contain a connection pool These per-process connection pools are configured to maintain a maximum of 20 connections. When under heavy load, it is possible to fill the connection pool for every process within a {productname} container. Under certain deployments and loads, this might require analysis to ensure that {productname} does not exceed the database's configured maximum connection count. -Overtime, the connection pools will release idle connections. To release all -connections immediately, {productname} requires a restart. +Over time, the connection pools will release idle connections. To release all connections immediately, {productname} must be restarted. -Database connection pooling may be toggled by setting the environment -variable `DB_CONNECTION_POOLING={true|false}` +Database connection pooling can be toggled by setting the `DB_CONNECTION_POOLING` to `true` or `false`. For example: -If database connection pooling is enabled, it is possible to change the -maximum size of the connection pool. This can be done through the following -`config.yaml` option: +[source,yaml] +---- +--- +DB_CONNECTION_POOLING: true +--- +---- -.... +When `DB_CONNECTION_POOLING` is enabled, you can change the maximum size of the connection pool with the `DB_CONNECTION_ARGS` in your `config.yaml`. For example: + +[source,yaml] +---- +--- DB_CONNECTION_ARGS: max_connections: 10 -.... +--- +---- +[id="database-connection-arguments"] ==== Database connection arguments -You can customize {productname} database connection settings within the -`config.yaml` file. These are entirely dependent upon the underlying -database driver, such as `psycopg2` for Postgres and `pymysql` for MySQL. -It is also possible to pass in arguments used by Peewee's Connection Pooling -mechanism as seen below. +You can customize your {productname} database connection settings within the `config.yaml` file. These are dependent on your deployment's database driver, for example, `psycopg2` for Postgres and `pymysql` for MySQL. You can also pass in argument used by Peewee's connection pooling mechanism. For example: -.... +[source,yaml] +---- +--- DB_CONNECTION_ARGS: max_connections: n # Max Connection Pool size. (Connection Pooling only) timeout: n # Time to hold on to connections. (Connection Pooling only) stale_timeout: n # Number of seconds to block when the pool is full. (Connection Pooling only) -.... +--- +---- -[[database-ssl-configuration]] + +[id="database-ssl-configuration"] ==== Database SSL configuration -Some key-value pairs defined under DB_CONNECTION_ARGS are generic while others are database-specific. In particular, SSL configuration depends on the database you are deploying. +Some key-value pairs defined under the `DB_CONNECTION_ARGS` field are generic, while others are specific to the database. In particular, SSL configuration depends on the database that you are deploying. +[id="postgres-ssl-connection-arguments"] ===== PostgreSQL SSL connection arguments -A sample PostgreSQL SSL configuration is given below: +The following YAML shows a sample PostgreSQL SSL configuration: +[source,yaml] ---- +--- DB_CONNECTION_ARGS: sslmode: verify-ca sslrootcert: /path/to/cacert +--- ---- -The `sslmode` option determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. There are six modes: +The `sslmode` parameter determines whether, or with, what priority a secure SSL TCP/IP connection will be negotiated with the server. There are six modes for the `sslmode` parameter: -* **disable:** only try a non-SSL connection -* **allow:** first try a non-SSL connection; if that fails, try an SSL connection -* **prefer:** (default) first try an SSL connection; if that fails, try a non-SSL connection -* **require:** only try an SSL connection. If a root CA file is present, verify the certificate in the same way as if verify-ca was specified -* **verify-ca:** only try an SSL connection, and verify that the server certificate is issued by a trusted certificate authority (CA) -* **verify-full:** only try an SSL connection, verify that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate +* **disabl:**: Only try a non-SSL connection. +* **allow**: Try a non-SSL connection first. Upon failure, try an SSL connection. +* **prefer**: Default. Try an SSL connection first. Upon failure, try a non-SSL connection. +* **require**: Only try an SSL connection. If a root CA file is present, verify the connection in the same way as if `verify-ca` was specified. +* **verify-ca**: Only try an SSL connection, and verify that the server certificate is issued by a trust certificate authority (CA). +* **verify-full**: Only try an SSL connection. Verify that the server certificate is issued by a trust CA, and that the requested server host name matches that in the certificate. -More information on the valid arguments for PostgreSQL is available at link:https://www.postgresql.org/docs/current/libpq-connect.html[]. +For more information about the valid arguments for PostgreSQL, see link:https://www.postgresql.org/docs/current/libpq-connect.html[Database Connection Control Functions]. +[id="mysql-ssl-connection-arguments"] ===== MySQL SSL connection arguments -A sample MySQL SSL configuration follows: +The following YAML shows a sample MySQL SSL configuration: +[source,yaml] ---- +--- DB_CONNECTION_ARGS: - ssl: + ssl: ca: /path/to/cacert +--- ---- -Information on the valid connection arguments for MySQL is available at link:https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html[]. - +For more information about the valid connection arguments for MySQL, see link:https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html[Connecting to the Server Using URI-Like Strings or Key-Value Pairs]. +[id="http-connection-counts"] ==== HTTP connection counts -It is possible to specify the quantity of simultaneous HTTP connections using -environment variables. These can be specified as a whole, or for a specific -component. The default for each is 50 parallel connections per process. +You can specify the quantity of simultaneous HTTP connections using environment variables. The environment variables can be specified as a whole, or for a specific component. The default for each is 50 parallel connections per process. See the following YAML for example environment variables; -Environment variables: +[source,yaml] ---- +--- WORKER_CONNECTION_COUNT_REGISTRY=n WORKER_CONNECTION_COUNT_WEB=n WORKER_CONNECTION_COUNT_SECSCAN=n WORKER_CONNECTION_COUNT=n +--- ---- -[Note] +[NOTE] ==== Specifying a count for a specific component will override any value -set in WORKER_CONNECTION_COUNT. +set in the `WORKER_CONNECTION_COUNT` configuration field. ==== +[id="dynamic-process-counts"] ==== Dynamic process counts To estimate the quantity of dynamically sized processes, the following calculation is used by default. [NOTE] -{productname} queries the available CPU count from the entire machine. Any limits +==== +{productname} queries the available CPU count from the entire machine. Any limits applied using kubernetes or other non-virtualized mechanisms will not affect -this behavior; {productname} will makes its calculation based on the total number of -processors on the Node. The default values listed are simply targets, but shall +this behavior. {productname} makes its calculation based on the total number of processors on the Node. The default values listed are simply targets, but shall not exceed the maximum or be lower than the minimum. +==== Each of the following process quantities can be overridden using the -environment variable specified below. +environment variable specified below: - registry - Provides HTTP endpoints to handle registry action * minimum: 8 @@ -263,21 +298,23 @@ environment variable specified below. * default: $CPU_COUNT x 2 * environment variable: WORKER_COUNT_SECSCAN +[id="environment-variables"] ==== Environment variables {productname} allows overriding default behavior using environment variables. -This table lists and describes each variable and the values they can expect. +The following table lists and describes each variable and the values they can expect. .Worker count environment variables [cols="2a,2a,2a",options="header"] |=== | Variable | Description | Values -| WORKER_COUNT_REGISTRY | Specifies the number of processes to handle Registry requests within the `Quay` container. | Integer between 8 and 64 +| WORKER_COUNT_REGISTRY | Specifies the number of processes to handle registry requests within the `Quay` container. | Integer between 8 and 64 | WORKER_COUNT_WEB | Specifies the number of processes to handle UI/Web requests within the container. | Integer between 2 and 32 -| WORKER_COUNT_SECSCAN | Specifies the number of processes to handle Security Scanning (e.g. Clair) integration within the container. | Integer between 2 and 4 -| DB_CONNECTION_POOLING | Toggle database connection pooling. In 3.4, it is disabled by default. | "true" or "false" +| WORKER_COUNT_SECSCAN | Specifies the number of processes to handle Security Scanning (for example, Clair) integration within the container. | Integer between 2 and 4 +| DB_CONNECTION_POOLING | Toggle database connection pooling. | "true" or "false" |=== +[id="turning-off-connection-pooling"] ==== Turning off connection pooling {productname} deployments with a large amount of user activity can regularly @@ -286,8 +323,7 @@ pooling, which is enabled by default for {productname}, can cause database connection count to rise exponentially and require you to turn off connection pooling. -If turning off connection pooling is not enough to prevent hitting that 2k +If turning off connection pooling is not enough to prevent hitting the 2k database connection limit, you need to take additional steps to deal with -the problem. In this case you might need to increase the maximum database +the problem. If this happens, you might need to increase the maximum database connections to better suit your workload. - diff --git a/modules/proc_manage-ipv6-dual-stack.adoc b/modules/proc_manage-ipv6-dual-stack.adoc new file mode 100644 index 000000000..889ddd29c --- /dev/null +++ b/modules/proc_manage-ipv6-dual-stack.adoc @@ -0,0 +1,108 @@ +:_content-type: CONCEPT +[id="proc_manage-ipv6-dual-stack"] += IPv6 and dual-stack deployments + +Your standalone {productname} deployment can now be served in locations that only support IPv6, such as Telco and Edge environments. Support is also offered for dual-stack networking so your {productname} deployment can listen on IPv4 and IPv6 simultaneously. + +For a list of known limitations, see xref:proc_manage-ipv6-limitations-38[IPv6 limitations] + +[id="proc-manage-enabling-ipv6"] +== Enabling the IPv6 protocol family + +Use the following procedure to enable IPv6 support on your standalone {productname} deployment. + +.Prerequisites + +* You have updated {productname} to 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `IPv6`, for example: ++ +[source,yaml] +---- +--- +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: IPv6 +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +--- +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to IPv6 by entering the following command: ++ +[source,terminal] +---- +$ curl /health/instance +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +After enabling IPv6 in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured to use IPv6 and is not hindered by the ipv6-limitations[current limitations]. + +[WARNING] +==== +If your environment is configured to IPv4, but the `FEATURE_LISTEN_IP_VERSION` configuration field is set to `IPv6`, {productname} will fail to deploy. +==== + +[id="proc-manageenabling-dual-stack"] +== Enabling the dual-stack protocol family + +Use the following procedure to enable dual-stack (IPv4 and IPv6) support on your standalone {productname} deployment. + + +.Prerequisites + +* You have updated {productname} to 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `dual-stack`, for example: ++ +[source,yaml] +---- +--- +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: dual-stack +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +--- +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to both channels by entering the following command: +.. For IPv4, enter the following command: ++ +[source,terminal] +---- +$ curl --ipv4 +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- +.. For IPv6, enter the following command: ++ +[source,terminal] +---- +$ curl --ipv6 +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +After enabling dual-stack in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured for dual-stack. + +[id="proc_manage-ipv6-limitations-38"] +== IPv6 and dua-stack limitations + +* Currently, attempting to configure your {productname} deployment with the common Azure Blob Storage configuration will not work on IPv6 single stack environments. Because the endpoint of Azure Blob Storage does not support IPv6, there is no workaround in place for this issue. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4433[PROJQUAY-4433]. + +* Currently, attempting to configure your {productname} deployment with Amazon S3 CloudFront will not work on IPv6 single stack environments. Because the endpoint of Amazon S3 CloudFront does not support IPv6, there is no workaround in place for this issue. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4470[PROJQUAY-4470]. + +* Currently, OpenShift Data Foundations (ODF) is unsupported when {productname} is deployed on IPv6 single stack environments. As a result, ODF cannot be used in IPv6 environments. This limitation is scheduled to be fixed in a future version of OpenShift Data Foundations. \ No newline at end of file diff --git a/modules/proc_manage-ldap-setup.adoc b/modules/proc_manage-ldap-setup.adoc index 076f07226..cfc710004 100644 --- a/modules/proc_manage-ldap-setup.adoc +++ b/modules/proc_manage-ldap-setup.adoc @@ -91,6 +91,51 @@ image:authentication-ldap-user-filter.png[User filters] LDAP_USER_FILTER: (memberof=cn=developers,ou=groups,dc=example,dc=com) .... +[id="ldap-restricted-users-enabling"] +==== Enabling the LDAP_RESTRICTED_USER_FILTER configuration field + +The `LDAP_RESTRICTED_USER_FILTER` configuration field is a subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. + +Use the following procedure to enable LDAP restricted users on your {productname} deployment. + +.Prerequisites + +* Your {productname} deployment uses LDAP as its authentication provider. +* You have configured the `LDAP_USER_FILTER` field in your `config.yaml` file. + +.Procedure + +. In your deployment's `config.yaml` file, add the `LDAP_RESTRICTED_USER_FILTER` parameter and specify the group of restricted users, for example, `members`: ++ +[source,yaml] +---- +--- +AUTHENTICATION_TYPE: LDAP +--- +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_RESTRICTED_USER_FILTER: (=) +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +---- + +. Start, or restart, your {productname} deployment. + +After enabling the `LDAP_RESTRICTED_USER_FILTER` feature, your LDAP {productname} users are restricted from reading and writing content, and creating organizations. + + === Administrator DN image:authentication-ldap-admin-dn.png[Administrator DN] @@ -163,3 +208,55 @@ SUPER_USERS: Restart the Red Hat `Quay` container with the updated config.yaml file. The next time you log in, the user will have superuser privileges. + +[id="ldap-super-users-enabling"] +== Enabling the LDAP_SUPERUSER_FILTER configuration field + +With the `LDAP_SUPERUSER_FILTER` field configured, {productname} administrators can configure Lightweight Directory Access Protocol (LDAP) users as superusers if {productname} uses LDAP as its authentication provider. + +Use the following procedure to enable LDAP superusers on your {productname} deployment. + +.Prerequisites + +* Your {productname} deployment uses LDAP as its authentication provider. +* You have configured the `LDAP_USER_FILTER` field field in your `config.yaml` file. + +.Procedure + +. In your deployment's `config.yaml` file, add the `LDAP_SUPERUSER_FILTER` parameter and add the group of users you want configured as super users, for example, `root`: ++ +[source,yaml] +---- +--- +AUTHENTICATION_TYPE: LDAP +--- +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_SUPERUSER_FILTER: (=) +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +---- + +. Start, or restart, your {productname} deployment. + +After enabling the `LDAP_SUPERUSER_FILTER` feature, your LDAP {productname} users have superuser privileges. The following options are available to superusers: + +* Manage users +* Manage organizations +* Manage service keys +* View the change log +* Query the usage logs +* Create globally visible user messages + diff --git a/modules/proc_manage-log-storage-elasticsearch.adoc b/modules/proc_manage-log-storage-elasticsearch.adoc new file mode 100644 index 000000000..28987ddd6 --- /dev/null +++ b/modules/proc_manage-log-storage-elasticsearch.adoc @@ -0,0 +1,59 @@ +[id="proc_manage-log-storage-elasticsearch"] += Configuring action log storage for Elasticsearch + +[NOTE] +==== +To configure action log storage for Elasticsearch, you must provide your own Elasticsearch stack, as it is not included with {productname} as a customizable component. +==== + +Enabling Elasticsearch logging can be done during {productname} deployment +or post-deployment using the {productname} configuration tool. The resulting +configuration is stored in the `config.yaml` file. +When configured, usage log access continues to be provided through the web UI +for repositories and organizations. + +Use the following procedure to configure action log storage for Elasticsearch: + +.Procedure + +. Obtain an Elasticsearch account. +. Open the {productname} Config Tool (either during or after {productname} deployment). +. Scroll to the *Action Log Storage Configuration* setting and select +*Elasticsearch*. The following figure shows the Elasticsearch settings +that appear: ++ +image:elasticsearch_action_logs.png[Choose Elasticsearch to view settings to store logs] + +. Fill in the following information for your Elasticsearch instance: ++ +* **Elasticsearch hostname**: The hostname or IP address of the system providing +the Elasticsearch service. +* **Elasticsearch port**: The port number providing the Elasticsearch service on the host +you just entered. Note that the port must be accessible from all systems +running the {productname} registry. The default is TCP port 9200. +* **Elasticsearch access key**: The access key needed to gain access to the Elastic search +service, if required. +* **Elasticsearch secret key**: The secret key needed to gain access to the Elastic search +service, if required. +* **AWS region**: If you are running on AWS, set the AWS region (otherwise, leave it blank). +* **Index prefix**: Choose a prefix to attach to log entries. +* **Logs Producer**: Choose either Elasticsearch (default) or Kinesis to direct logs to +an intermediate Kinesis stream on AWS. You need to set up your own pipeline to +send logs from Kinesis to Elasticsearch (for example, Logstash). The following figure +shows additional fields you would need to fill in for Kinesis: ++ +image:kinesis_producer.png[On AWS optionally set up an intermediate Kinesis stream] + +. If you chose Elasticsearch as the Logs Producer, no further configuration is needed. +If you chose Kinesis, fill in the following: ++ +* **Stream name**: The name of the Kinesis stream. +* **AWS access key**: The name of the AWS access key needed to gain access to the Kinesis stream, if required. +* **AWS secret key**: The name of the AWS secret key needed to gain access to the Kinesis stream, if required. +* **AWS region**: The AWS region. + +. When you are done, save the configuration. The configuration tool checks your settings. +If there is a problem connecting to the Elasticsearch or Kinesis services, +you will see an error and have the opportunity to continue editing. Otherwise, +logging will begin to be directed to your Elasticsearch configuration after the +cluster restarts with the new configuration. diff --git a/modules/proc_manage-log-storage-splunk.adoc b/modules/proc_manage-log-storage-splunk.adoc new file mode 100644 index 000000000..5065b1453 --- /dev/null +++ b/modules/proc_manage-log-storage-splunk.adoc @@ -0,0 +1,2 @@ +[id="proc_manage-log-storage-splunk"] += Configuring action log storage for Splunk \ No newline at end of file diff --git a/modules/proc_manage-log-storage.adoc b/modules/proc_manage-log-storage.adoc index a507f90ce..13a4a2d02 100644 --- a/modules/proc_manage-log-storage.adoc +++ b/modules/proc_manage-log-storage.adoc @@ -1,60 +1,4 @@ -[[proc_manage-log-storage]] -= Configuring action log storage for Elasticsearch +[id="proc_manage-log-storage"] += Configuring action log storage for Elasticsearch and Splunk -By default, the past three months of usage logs are stored in the {productname} database -and exposed via the web UI on organization and repository levels. Appropriate administrative -privileges are required to see log entries. For deployments with a large amount of logged operations, you can now store -the usage logs in Elasticsearch instead of the {productname} database backend. -To do this, you need to provide your own Elasticsearch stack, as it is not included with -{productname} as a customizable component. - -Enabling Elasticsearch logging can be done during {productname} deployment -or post-deployment using the {productname} Config Tool. The resulting -configuration is stored in the `config.yaml` file. -Once configured, usage log access continues to be provided the same way, via the web UI -for repositories and organizations. - -Here's how to configure action log storage to change it from the default -{productname} database to use Elasticsearch: - -. Obtain an Elasticsearch account. -. Open the {productname} Config Tool (either during or after {productname} deployment). -. Scroll to the _Action Log Storage Configuration_ setting and select -_Elasticsearch_ instead of _Database_. The following figure shows the Elasticsearch settings -that appear: -+ -image:elasticsearch_action_logs.png[Choose Elasticsearch to view settings to store logs] - -. Fill in the following information for your Elasticsearch instance: -+ -* **Elasticsearch hostname**: The hostname or IP address of the system providing -the Elasticsearch service. -* **Elasticsearch port**: The port number providing the Elasticsearch service on the host -you just entered. Note that the port must be accessible from all systems -running the {productname} registry. The default is TCP port 9200. -* **Elasticsearch access key**: The access key needed to gain access to the Elastic search -service, if required. -* **Elasticsearch secret key**: The secret key needed to gain access to the Elastic search -service, if required. -* **AWS region**: If you are running on AWS, set the AWS region (otherwise, leave it blank). -* **Index prefix**: Choose a prefix to attach to log entries. -* **Logs Producer**: Choose either Elasticsearch (default) or Kinesis to direct logs to -an intermediate Kinesis stream on AWS. You need to set up your own pipeline to -send logs from Kinesis to Elasticsearch (for example, Logstash). The following figure -shows additional fields you would need to fill in for Kinesis: -+ -image:kinesis_producer.png[On AWS optionally set up an intermediate Kinesis stream] - -. If you chose Elasticsearch as the Logs Producer, no further configuration is needed. -If you chose Kinesis, fill in the following: -+ -* **Stream name**: The name of the Kinesis stream. -* **AWS access key**: The name of the AWS access key needed to gain access to the Kinesis stream, if required. -* **AWS secret key**: The name of the AWS secret key needed to gain access to the Kinesis stream, if required. -* **AWS region**: The AWS region. - -. When you are done, save the configuration. The Config Tool checks your settings. -If there is a problem connecting to the Elasticsearch or Kinesis services, -you will see an error and have the opportunity to continue editing. Otherwise, -logging will begin to be directed to your Elasticsearch configuration after the -cluster restarts with the new configuration. +By default, the previous three months of usage logs are stored in the {productname} database and exposed through the web UI on organization and repository levels. Appropriate administrative privileges are required to see log entries. For deployments with a large amount of logged operations, you can store the usage logs in Elasticsearch and Splunk instead of the {productname} database backend. \ No newline at end of file diff --git a/modules/proc_manage-quay-prometheus.adoc b/modules/proc_manage-quay-prometheus.adoc index ddf67d583..76f5fa3eb 100644 --- a/modules/proc_manage-quay-prometheus.adoc +++ b/modules/proc_manage-quay-prometheus.adoc @@ -8,18 +8,18 @@ endpoint on each instance to allow for easy monitoring and alerting. [[exposing-the-prometheus-endpoint]] == Exposing the Prometheus endpoint -=== Standalone {productname} +=== Standalone {productname} -When using `podman run` to start the `Quay` container, expose the metrics port `9091`: +When using `podman run` to start the `Quay` container, expose the metrics port `9091`: [subs="verbatim,attributes"] -.... +---- $ sudo podman run -d --rm -p 80:8080 -p 443:8443 -p 9091:9091\ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} -.... +---- The metrics will now be available: diff --git a/modules/proc_manage-security-scanning.adoc b/modules/proc_manage-security-scanning.adoc index 20232dd66..22c3dbce1 100644 --- a/modules/proc_manage-security-scanning.adoc +++ b/modules/proc_manage-security-scanning.adoc @@ -7,7 +7,7 @@ This document explains how to configure Clair with {productname}. [NOTE] ==== -With the release of Red Hat Quay 3.4, the default version of Clair is V4. This new version V4 is no longer being released as link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] and is supported for production use. Customers are strongly encouraged to use Clair V4 for with Red Hat Quay 3.4. It is possible to run both Clair V4 and Clair V2 simultaneously if so desired. In future versions of Red Hat Quay, Clair V2 will eventually be removed. +With the release of {productname} 3.4, the default version of Clair is V4. This new version V4 is no longer being released as link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] and is supported for production use. Customers are strongly encouraged to use Clair V4 for with {productname} 3.4. It is possible to run both Clair V4 and Clair V2 simultaneously if so desired. In future versions of {productname}, Clair V2 will eventually be removed. ==== == Set up Clair V2 in the {productname} config tool diff --git a/modules/proc_manage-upgrade-quay-guide.adoc b/modules/proc_manage-upgrade-quay-guide.adoc index 551e991a8..657882881 100644 --- a/modules/proc_manage-upgrade-quay-guide.adoc +++ b/modules/proc_manage-upgrade-quay-guide.adoc @@ -9,7 +9,7 @@ This document describes how to upgrade one or more `Quay` containers. The database is the "source of truth" for Quay, and some version upgrades will trigger a schema update and data migration. Such versions are clearly documented in the -https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/[Red Hat Quay Release Notes]. +https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/[{productname} Release Notes]. Backup the database before upgrading Quay. Once the backup completes, use the procedure in this document to stop the running `Quay` container, start the new container, and check the health of @@ -25,7 +25,7 @@ the upgraded Quay service. [[pull-the-latest-quay-enterprise-release-from-the-repository]] == Pull the latest Quay release from the repository. -Check the https://access.redhat.com/documentation/en-us/red_hat_quay/2.9/html-single/red_hat_quay_release_notes/[list of Red Hat Quay releases] for the latest version. +Check the https://access.redhat.com/documentation/en-us/red_hat_quay/2.9/html-single/red_hat_quay_release_notes/[list of {productname} releases] for the latest version. ``` # docker pull quay.io/coreos/registry:RELEASE_VERSION diff --git a/modules/proc_manage-upgrade-quay.adoc b/modules/proc_manage-upgrade-quay.adoc index 93ba7c405..c64fa5ad5 100644 --- a/modules/proc_manage-upgrade-quay.adoc +++ b/modules/proc_manage-upgrade-quay.adoc @@ -2,7 +2,7 @@ = Upgrading Quay The full list of Quay versions can be found on the -https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/[Red Hat Quay Release Notes] +https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/[{productname} Release Notes] page. [[special-note-upgrading-from-quay-enterprise-2.0.0-to-2.0.0]] @@ -28,7 +28,7 @@ release. [[the-upgrade-process]] == The upgrade process -. Visit the https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/[Red Hat Quay Release Notes] page and note the latest version of Quay. +. Visit the https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/[{productname} Release Notes] page and note the latest version of Quay. . Shutdown the Quay cluster: Remove *all* containers from service. . On a *single* node, run the newer version of Quay. diff --git a/modules/proc_quay-bridge-operator.adoc b/modules/proc_quay-bridge-operator.adoc deleted file mode 100644 index 034f82491..000000000 --- a/modules/proc_quay-bridge-operator.adoc +++ /dev/null @@ -1,110 +0,0 @@ -[[quay-bridge-operator]] -= Integrate {productname} into OpenShift with the Bridge Operator - -Using the Quay Bridge Operator, you can replace the integrated container -registry in OpenShift with a {productname} registry. By doing this, your -integrated OpenShift registry becomes a highly available, enterprise-grade -{productname} registry with enhanced role based access control (RBAC) features. - -The primary goals of the Bridge Operator is to duplicate the features of the -integrated OpenShift registry in the new {productname} registry. The features -enabled with this Operator include: - -* Synchronizing OpenShift namespaces as {productname} organizations. - - Creating Robot accounts for each default namespace service account - - Creating Secrets for each created Robot Account (associating each -Robot Secret to a Service Account as Mountable and Image Pull Secret) - - Synchronizing OpenShift ImageStreams as Quay Repositories -* Automatically rewriting new Builds making use of ImageStreams to output to {productname} -* Automatically importing an ImageStream tag once a build completes - -Using this procedure with the Quay Bridge Operator, you enable bi-directional communication between your {productname} and OpenShift clusters. - - -== Running the Quay Bridge Operator - -=== Prerequisites - -Before setting up the Bridge Operator, have the following in place: - -* An existing {productname} environment for which you have superuser permissions -* A Red Hat OpenShift Container Platform environment (4.2 or later is recommended) -for which you have cluster administrator permissions -* An OpenShift command line tool (`oc` command) - -=== Setting up and configuring OpenShift and {productname} - -Both {productname} and OpenShift configuration is required: - -=== {productname} setup - -Create a dedicated {productname} organization, and from a new application -you create within that organization, generate an OAuth token -to be used with the Quay Bridge Operator in OpenShift - -. Log in to {productname} as a user with superuser access and select the -organization for which the external application will be configured. -. In the left navigation, select Applications. -. Select `Create New Application` and entering a name for the new application (for example, `openshift`). -. With the new application displayed, select it. -. In the left navigation, select `Generate Token` to create a new OAuth2 token. -. Select all checkboxes to grant the access needed for the integration. -. Review the assigned permissions and then select `Authorize Application`, then confirm it. -. Copy and save the generated Access Token that appears to use in the next section. - -=== OpenShift Setup -Setting up OpenShift for the Quay Bridge Operator requires several steps, including: - -==== Deploying the Operator -The fastest method for deploying the operator is to deploy from OperatorHub. From the Administrator perspective in the OpenShift Web Console, navigate to the Operators tab, and then select OperatorHub. - -Search for Quay Bridge Operator and then select Install. - -Select an Approval Strategy and then select Install which will deploy the operator to the cluster. - - - -==== Creating an OpenShift secret for the OAuth token - -The Operator will use the previously obtained Access Token to communicate with Quay. Store this token within OpenShift as a secret. - -Execute the following command to create a secret called `quay-integration` in the `openshift-operators` namespace with a key called `token` containing the access token: - -[source,bash] ----- -$ oc create secret -n openshift-operators generic quay-integration --from-literal=token= ----- - -==== Create the QuayIntegration Custom Resource - -Finally, to complete the integration between OpenShift and Quay, a `QuayIntegration` custom resource needs to be created. This can be completed in the Web Console or from the command line. - -.quay-integration.yaml -[source,yaml] ----- -apiVersion: quay.redhat.com/v1 -kind: QuayIntegration -metadata: - name: example-quayintegration -spec: - clusterID: openshift <1> - credentialsSecret: - namespace: openshift-operators - name: quay-integration<2> - quayHostname: https:// <3> - insecureRegistry: false <4> ----- -<1> The clusterID value should be unique across the entire ecosystem. This value is optional and defaults to `openshift`. -<2> The `credentialsSecret` property refers to the namespace and name of the secret containing the token that was previously created. -<3> Replace QUAY_URL with the hostname of your {productname} instance. -<4> If Quay is using self signed certificates, set the property `insecureRegistry: true`. - - -Create the `QuayIntegration` Custom Resource: - -[source,bash] ----- -$ oc create -f quay-integration.yaml ----- - -At this point a Quay integration resource is created, linking the OpenShift cluster to the {productname} instance. Organizations within Quay should be created for the related namespaces from the OpenShift environment diff --git a/modules/proc_setting-up-quay-for-qbo.adoc b/modules/proc_setting-up-quay-for-qbo.adoc new file mode 100644 index 000000000..cf6509183 --- /dev/null +++ b/modules/proc_setting-up-quay-for-qbo.adoc @@ -0,0 +1,40 @@ +:_content-type: PROCEDURE +[id="setting-up-quay-for-qbo"] += Setting up {productname} for the {qbo} + +In this procedure, you will create a dedicated {productname} organization, and from a new application created within that organization you will generate an OAuth token to be used with the {qbo} in {ocp}. + +.Procedure + +. Log in to {productname} through the web UI. + +. Select the organization for which the external application will be configured. + +. On the navigation pane, select *Applications*. + +. Select *Create New Application* and enter a name for the new application, for example, `openshift`. + +. On the *OAuth Applications* page, select your application, for example, `openshift`. + +. On the navigation pane, select *Generate Token*. + +. Select the following fields: ++ +* *Administer Organization* +* *Administer Repositories* +* *Create Repositories* +* *View all visible repositories* +* *Read/Write to any accessible repositories* +* *Administer User* +* *Read User Information* + +. Review the assigned permissions. + +. Select *Authorize Application* and then confirm confirm the authorization by selecting *Authorize Application*. + +. Save the generated access token. ++ +[IMPORTANT] +==== +As of {productname} 3.7, there is no token management. You cannot list tokens, delete tokens, or modify tokens. The generated access token is only shown once and cannot be re-obtained after closing the page. +==== diff --git a/modules/proc_upgrade_standalone.adoc b/modules/proc_upgrade_standalone.adoc index 4a8e8c153..0d9fe94f7 100644 --- a/modules/proc_upgrade_standalone.adoc +++ b/modules/proc_upgrade_standalone.adoc @@ -1,6 +1,6 @@ = Standalone upgrade -In general, {productname} supports upgrades from a prior (N-1) minor version only. For example, upgrading directly from {productname} 3.0.5 to the latest version of 3.5 is not supported. Instead, users would have to upgrade as follows: +In general, {productname} supports upgrades from a prior (N-1) minor version only. For example, upgrading directly from {productname} 3.0.5 to the latest version of 3.5 is not supported. Instead, users would have to upgrade as follows: . 3.0.5 -> 3.1.3 . 3.1.3 -> 3.2.2 @@ -10,29 +10,38 @@ In general, {productname} supports upgrades from a prior (N-1) minor version onl This is required to ensure that any necessary database migrations are done correctly and in the right order during the upgrade. +In some cases, {productname} supports direct, single-step upgrades from prior (N-2, N-3) minor versions. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. The following upgrade paths are supported: -// TODO 36 Expand on standalone upgrade from 33->36, 34->36 -[NOTE] -==== -{productname} 3.6 supports direct, single-step upgrade from 3.3.z and from 3.4.z. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. -==== +. 3.3.z -> 3.6.z +. 3.4.z -> 3.6.z +. 3.4.z -> 3.7.z +. 3.5.z -> 3.7.z +. 3.7.z -> 3.8.z -For users wanting to upgrade via the Quay Operator, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrading_quay_by_upgrading_the_quay_operator[Upgrading Quay by upgrading the Quay Operator]. +For users wanting to upgrade via the Quay Operator, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrading_quay_by_upgrading_the_quay_operator[Upgrading Quay by upgrading the Quay Operator]. -This document describes the steps needed to perform each individual upgrade. Determine your current version and then follow the steps in sequential order, starting with your current version and working up to your desired target version. +This document describes the steps needed to perform each individual upgrade. Determine your current version and then follow the steps in sequential order, starting with your current version and working up to your desired target version. + +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_6_z[Upgrade to 3.8.z from 3.7.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_6_z[Upgrade to 3.7.z from 3.6.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_5_z[Upgrade to 3.7.z from 3.5.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_4_z[Upgrade to 3.7.z from 3.4.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_3_z[Upgrade to 3.7.z from 3.3.z] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_6_z_from_3_5_z[Upgrade to 3.6.z from 3.5.z] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_6_z_from_3_4_z[Upgrade to 3.6.z from 3.4.z] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_6_z_from_3_3_z[Upgrade to 3.6.z from 3.3.z] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_5_7_from_3_4_z[Upgrade to 3.5.z from 3.4.z] +ifdef::downstream[] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_4_6_from_3_3_z[Upgrade to 3.4.z from 3.3.4] -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_3_4_from_3_2_z[Upgrade to 3.3.4 from 3.2.2] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_3_4_from_3_2_z[Upgrade to 3.3.4 from 3.2.2] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_2_2_from_3_1_z[Upgrade to 3.2.2 from 3.1.3] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_1_3_from_3_0_z[Upgrade to 3.1.3 from 3.0.5] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_0_5_from_2_9_5[Upgrade to 3.0.5 from 2.9.5] +endif::downstream[] -See the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes/index[Red Hat Quay Release Notes] for information on features for individual releases. +See the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes/index[{productname} Release Notes] for information on features for individual releases. The general procedure for a manual upgrade consists of the following steps: @@ -44,68 +53,180 @@ The general procedure for a manual upgrade consists of the following steps: == Accessing images -Images for Quay 3.4.0 and later are available from link:https://registry.redhat.io[registry.redhat.io] and +Images for Quay 3.4.0 and later are available from link:https://registry.redhat.io[registry.redhat.io] and link:https://registry.access.redhat.com[registry.access.redhat.com], with authentication set up as described in link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication]. -Images for Quay 3.3.4 and earlier are available from link:https://quay.io[quay.io], with authentication set up as described in link:https://access.redhat.com/solutions/3533201[Accessing Red Hat Quay without a CoreOS login]. +Images for Quay 3.3.4 and earlier are available from link:https://quay.io[quay.io], with authentication set up as described in link:https://access.redhat.com/solutions/3533201[Accessing {productname} without a CoreOS login]. -== Upgrade to 3.6.z from 3.5.z +== Upgrade to 3.8.z from 3.7.z === Target images * **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] * **Clair:** {productrepo}/{clairimage}:{productminv} -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) + +== Upgrade to 3.7.z from 3.6.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.7.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.7.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) + +== Upgrade to 3.7.z from 3.5.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.7.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.7.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) + +== Upgrade to 3.7.z from 3.4.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.7.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.7.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) + +== Upgrade to 3.7.z from 3.3.z + +Upgrading to {productname} 3.7 from 3.3. is unsupported. Users must first upgrade to 3.6 from 3.3, and then upgrade to 3.7. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_6_z_from_3_3_z[Upgrade to 3.6.z from 3.3.z]. + +== Upgrade to 3.6.z from 3.5.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.6.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.6.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) == Upgrade to 3.6.z from 3.4.z ++ +[NOTE] +==== +{productname} 3.6 supports direct, single-step upgrade from 3.4.z. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. +==== Upgrading to {productname} 3.6 from 3.4.z requires a database migration which does not support downgrading back to a prior version of {productname}. Please back up your database before performing this migration. -Users will also need to configure a completely new Clair v4 instance to replace the old Clair v2 when upgrading from 3.4.z. For instructions on configuring Clair v4, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-standalone[Setting up Clair on a non-OpenShift Red Hat Quay deployment]. +Users will also need to configure a completely new Clair v4 instance to replace the old Clair v2 when upgrading from 3.4.z. For instructions on configuring Clair v4, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-standalone[Setting up Clair on a non-OpenShift {productname} deployment]. === Target images -* **Quay:** {productrepo}/{quayimage}:{productminv} -* **Clair:** {productrepo}/{clairimage}:{productminv} -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **Quay:** {productrepo}/{quayimage}:v3.6.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.6.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) == Upgrade to 3.6.z from 3.3.z ++ +[NOTE] +==== +{productname} 3.6 supports direct, single-step upgrade from 3.3.z. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. +==== Upgrading to {productname} 3.6.z from 3.3.z requires a database migration which does not support downgrading back to a prior version of {productname}. Please back up your database before performing this migration. -Users will also need to configure a completely new Clair v4 instance to replace the old Clair v2 when upgrading from 3.3.z. For instructions on configuring Clair v4, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-standalone[Setting up Clair on a non-OpenShift Red Hat Quay deployment]. +Users will also need to configure a completely new Clair v4 instance to replace the old Clair v2 when upgrading from 3.3.z. For instructions on configuring Clair v4, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-standalone[Setting up Clair on a non-OpenShift {productname} deployment]. === Target images -* **Quay:** {productrepo}/{quayimage}:{productminv} -* **Clair:** {productrepo}/{clairimage}:{productminv} -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **Quay:** {productrepo}/{quayimage}:v3.6.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.6.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) + +=== Swift configuration when upgrading from 3.3.z to 3.6 + +When upgrading from {productname} 3.3.z to 3.6.z, some users might receive the following error: `Switch auth v3 requires tenant_id (string) in os_options`. As a workaround, you can manually update your `DISTRIBUTED_STORAGE_CONFIG` to add the `os_options` and `tenant_id` parameters: + +[source,yaml] +---- + DISTRIBUTED_STORAGE_CONFIG: + brscale: + - SwiftStorage + - auth_url: http://****/v3 + auth_version: "3" + os_options: + tenant_id: **** + project_name: ocp-base + user_domain_name: Default + storage_path: /datastorage/registry + swift_container: ocp-svc-quay-ha + swift_password: ***** + swift_user: ***** +---- == Upgrade to 3.5.7 from 3.4.z === Target images +ifdef::downstream[] * **Quay:** {productrepo}/{quayimage}:v3.5.7 -* **Clair:** {productrepo}/{clairimage}:v3.5.7 -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **Clair:** {productrepo}/{clairimage}:{productminv} +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) +endif::downstream[] +ifdef::upstream[] +* **Quay:** {productrepo}/{quayimage}:v3.5.1 +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) +endif::upstream[] -== Upgrade to 3.4.6 from 3.3.z + +ifdef::downstream[] +== Upgrade to 3.4.6 from 3.3.z Upgrading to Quay 3.4 requires a database migration which does not support downgrading back to a prior version of Quay. Please back up your database before performing this migration. === Target images * **Quay:** {productrepo}/{quayimage}:v3.4.6 -* **Clair:** {productrepo}/{clairimage}:v3.4.6 -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **Clair:** {productrepo}/{clairimage}:{productminv} +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) -== Upgrade to 3.3.4 from 3.2.z +== Upgrade to 3.3.4 from 3.2.z === Target images -* **Quay:** quay.io/redhat/quay:v3.3.4 -* **Clair:** quay.io/redhat/clair-jwt:v3.3.4 +* **Quay:** quay.io/redhat/quay:v3.3.4 +* **Clair:** {productrepo}/{clairimage}:{productminv} * **PostgreSQL:** rhscl/postgresql-96-rhel7 * **Redis:** registry.access.redhat.com/rhscl/redis-32-rhel7 @@ -150,8 +271,8 @@ on all nodes and bring up the new quay 3.2.2 service on those nodes. === Target images -* **Quay:** quay.io/redhat/quay:v3.2.2 -* **Clair:** quay.io/redhat/clair-jwt:v3.2.2 +* **Quay:** quay.io/redhat/quay:v3.2.2 +* **Clair:** {productrepo}/{clairimage}:{productminv} * **PostgreSQL:** rhscl/postgresql-96-rhel7 * **Redis:** registry.access.redhat.com/rhscl/redis-32-rhel7 @@ -161,8 +282,8 @@ on all nodes and bring up the new quay 3.2.2 service on those nodes. == Upgrade to 3.1.3 from 3.0.z === Target images -* **Quay:** quay.io/redhat/quay:v3.1.3 -* **Clair:** quay.io/redhat/clair-jwt:v3.1.3 +* **Quay:** quay.io/redhat/quay:v3.1.3 +* **Clair:** {productrepo}/{clairimage}:{productminv} * **PostgreSQL:** rhscl/postgresql-96-rhel7 * **Redis:** registry.access.redhat.com/rhscl/redis-32-rhel7 @@ -171,9 +292,9 @@ on all nodes and bring up the new quay 3.2.2 service on those nodes. For the 2.9.5 to 3.0.5 upgrade, you can either do the whole upgrade with {productname} down (synchronous upgrade) or only bring down {productname} for a few minutes and have the bulk of the upgrade continue with {productname} running (background upgrade). -A background upgrade could take longer to run the upgrade depending on how many tags need to be processed. However, there is less total downtime. The downside of a background upgrade is that you will not have access to the latest features until the upgrade completes. The cluster runs from the Quay v3 container in v2 compatibility mode until the upgrade is complete. - +A background upgrade could take longer to run the upgrade depending on how many tags need to be processed. However, there is less total downtime. The downside of a background upgrade is that you will not have access to the latest features until the upgrade completes. The cluster runs from the Quay v3 container in v2 compatibility mode until the upgrade is complete. include::con_upgrade_v3.adoc[leveloffset=+2] include::proc_upgrade_v3.adoc[leveloffset=+2] +endif::downstream[] diff --git a/modules/proc_use-api.adoc b/modules/proc_use-api.adoc index 8ef72cefc..0b87b8a77 100644 --- a/modules/proc_use-api.adoc +++ b/modules/proc_use-api.adoc @@ -156,7 +156,7 @@ $ curl -H "Content-Type: application/json" -H "Authorization: Bearer Fava2kV9C9 ** The returned content includes a generated password for the new user account: + [source,json] ----- +---- { "username": "quaysuper", "email": "quaysuper@example.com", @@ -389,7 +389,7 @@ $ curl -X GET -k -H "Authorization: Bearer qz9NZ2Np1f55CSZ3RVOvxjeUdkzYuCp0pKggA To enable directory synchronization for the team `newteam` in organization `testadminorg`, where the corresponding group name in LDAP is `ldapgroup`: ``` -$ curl -X POST -H "Authorization: Bearer 9rJYBR3v3pXcj5XqIA2XX6Thkwk4gld4TCYLLWDF" \ +$ curl -X POST -H "Authorization: Bearer 9rJYBR3v3pXcj5XqIA2XX6Thkwk4gld4TCYLLWDF" \ -H "Content-type: application/json" \ -d '{"group_dn": "cn=ldapgroup,ou=Users"}' \ http://quay1-server:8080/api/v1/organization/testadminorg/team/newteam/syncing @@ -461,6 +461,27 @@ print(r.text) $ curl -X POST https://quay.io/api/v1/repository \ -H 'Authorization: Bearer {token}' \ -H 'Content-Type: application/json' \ - -d '{"namespace":"yournamespace", "repository":"yourreponame", + -d '{"namespace":"yournamespace", "repository":"yourreponame", "description":"descriptionofyourrepo", "visibility": "private"}' | jq ``` + +[id="api-create-mirrored-repo"] +=== Create a mirrored repository + +.Minimal configuration +[souce,terminal] +---- +curl -X POST + -H "Authorization: Bearer ${bearer_token}" + -H "Content-Type: application/json" + --data '{"external_reference": "quay.io/minio/mc", "external_registry_username": "", "sync_interval": 600, "sync_start_date": "2021-08-06T11:11:39Z", "root_rule": {"rule_kind": "tag_glob_csv", "rule_value": [ "latest" ]}, "robot_username": "orga+robot"}' https://${quay_registry}/api/v1/repository/${orga}/${repo}/mirror | jq +---- + +.Extended configuration +[source,terminal] +---- +$ curl -X POST + -H "Authorization: Bearer ${bearer_token}" + -H "Content-Type: application/json" + --data '{"is_enabled": true, "external_reference": "quay.io/minio/mc", "external_registry_username": "username", "external_registry_password": "password", "external_registry_config": {"unsigned_images":true, "verify_tls": false, "proxy": {"http_proxy": "http://proxy.tld", "https_proxy": "https://proxy.tld", "no_proxy": "domain"}}, "sync_interval": 600, "sync_start_date": "2021-08-06T11:11:39Z", "root_rule": {"rule_kind": "tag_glob_csv", "rule_value": [ "*" ]}, "robot_username": "orga+robot"}' https://${quay_registry}/api/v1/repository/${orga}/${repo}/mirror | jq +---- \ No newline at end of file diff --git a/modules/proc_use-quay-build-workers-dockerfiles.adoc b/modules/proc_use-quay-build-workers-dockerfiles.adoc index 93fa52aa9..47ebaa8d0 100644 --- a/modules/proc_use-quay-build-workers-dockerfiles.adoc +++ b/modules/proc_use-quay-build-workers-dockerfiles.adoc @@ -11,7 +11,7 @@ The {productname} Build system is designed for scalability (since it is used to ifdef::downstream[] [NOTE] ==== -The upstream version of Red Hat Quay provides instructions on how to configure an AWS/EC2 based Executor. This configuration is not supported for Red Hat Quay customers. +The upstream version of {productname} provides instructions on how to configure an AWS/EC2 based Executor. This configuration is not supported for {productname} customers. ==== endif::downstream[] @@ -41,11 +41,11 @@ The {productname} builds need access to a Redis instance to track build status i === OpenShift TLS component -The {productname} 3.6 Operator has introduced the `tls` component which allows you to control TLS configuration. +The `tls` component allows you to control TLS configuration. [NOTE] ==== -{productname} 3.6 does not support builders when the TLS component is managed by the Operator. +{productname} 3.7 does not support builders when the TLS component is managed by the Operator. ==== If you set `tls` to `unmanaged`, you supply your own `ssl.cert` and `ssl.key` files. In this instance, if you want your cluster to support builders, you must add both the Quay route and the builder route name to the SAN list in the cert, or alternatively use a wildcard. To add the builder route, use the following format: @@ -178,7 +178,7 @@ BUILD_MANAGER: - EXECUTOR: kubernetes BUILDER_NAMESPACE: builder K8S_API_SERVER: api.openshift.somehost.org:6443 - K8S_API_TLS_CA: /conf/stack/extra_ca_cert_build_cluster.crt + K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build_cluster.crt VOLUME_SIZE: 8G KUBERNETES_DISTRIBUTION: openshift CONTAINER_MEMORY_LIMITS: 5120Mi @@ -248,7 +248,7 @@ ifdef::downstream[] (`registry.redhat.io/quay/quay-builder-qemu-rhcos:v3.4.0`). endif::downstream[] SETUP_TIME:: Specifies the number of seconds at which a build times out if it has not yet registered itself with the Build Manager (default is 500 seconds). Builds that time out are attempted to be restarted three times. If the build does not register itself after three attempts it is considered failed. -MINIMUM_RETRY_THRESHOLD:: This setting is used with multiple Executors; it indicates how many retries are attempted to start a build before a different Executor is chosen. Setting to 0 means there are no restrictions on how many tries the build job needs to have. This value should be kept intentionally small (three or less) to ensure failovers happen quickly in the event of infrastructure failures. +MINIMUM_RETRY_THRESHOLD:: This setting is used with multiple Executors; it indicates how many retries are attempted to start a build before a different Executor is chosen. Setting to 0 means there are no restrictions on how many tries the build job needs to have. This value should be kept intentionally small (three or less) to ensure failovers happen quickly in the event of infrastructure failures. You must specify a value for this setting. E.g Kubernetes is set as the first executor and EC2 as the second executor. If we want the last attempt to run a job to always be executed on EC2 and not Kubernetes, we would set the Kubernetes executor’s `MINIMUM_RETRY_THRESHOLD` to 1 and EC2’s `MINIMUM_RETRY_THRESHOLD` to 0 (defaults to 0 if not set). In this case, kubernetes’ `MINIMUM_RETRY_THRESHOLD` > retries_remaining(1) would evaluate to False, thus falling back to the second executor configured SSH_AUTHORIZED_KEYS:: List of ssh keys to bootstrap in the ignition config. This allows other keys to be used to ssh into the EC2 instance or QEMU VM @@ -317,6 +317,7 @@ BUILD_MANAGER: - ephemeral - ALLOWED_WORKER_COUNT: 1 ORCHESTRATOR_PREFIX: buildman/production/ + JOB_REGISTRATION_TIMEOUT: 600 ORCHESTRATOR: REDIS_HOST: quay-redis-host REDIS_PASSWORD: quay-redis-password diff --git a/modules/proxy-cache-arch.adoc b/modules/proxy-cache-arch.adoc index 3dbbb5a7a..701ccd781 100644 --- a/modules/proxy-cache-arch.adoc +++ b/modules/proxy-cache-arch.adoc @@ -16,9 +16,9 @@ image:updated-layers-in-cache.png[Updating opposing layers overview] If the upstream image and cached version are the same, no layers are pulled and the cached image is delivered to the user. -In some cases, users initiate pulls when the upstream registry is down. If this happens with the configured staleness period, the image stored in cache is delivered. If the pull happens after the configured staleness period, the error is propagated to the user. The following image depicts an architectural overview when a pull happens after the configured staleness period: +In some cases, users initiate pulls when the upstream registry is down. If this happens with the configured staleness period, the image stored in cache is delivered. If the pull happens after the configured staleness period, the error is propagated to the user. The following image depicts an architectural overview when a pull happens after the configured staleness period: -image: cache-proxy-staleness-pull.png[Staleness pull overview] +image:cache-proxy-staleness-pull.png[Staleness pull overview] Quay administrators can leverage the configurable size limit of an organization to limit cache size so that backend storage consumption remains predictable. This is achieved by discarding images from the cache according to the frequency in which an image is used. The following image depicts an architectural overview of this scenario: diff --git a/modules/proxy-cache-leveraging-storage-quota-limits.adoc b/modules/proxy-cache-leveraging-storage-quota-limits.adoc new file mode 100644 index 000000000..c10002471 --- /dev/null +++ b/modules/proxy-cache-leveraging-storage-quota-limits.adoc @@ -0,0 +1,51 @@ +:_content-type: CONCEPT +[id="proxy-cache-leveraging-storage-quota-limits"] +== Leveraging storage quota limits in proxy organizations + +With {productname} 3.8, the proxy cache feature has been enhanced with an auto-pruning feature for tagged images. The auto-pruning of image tags is only available when a proxied namespace has quota limitations configured. Currently, if an image size is greater than quota for an organization, the image is skipped from being uploaded until an administrator creates the necessary space. Now, when an image is pushed that exceeds the allotted space, the auto-pruning enhancement marks the least recently used tags for deletion. As a result, the new image tag is stored, while the least used image tag is marked for deletion. + +[IMPORTANT] +==== +* As part of the auto-pruning feature, the tags that are marked for deletion are eventually garbage collected by the garbage collector (gc) worker process. As a result, the quota size restriction is not fully enforced during this period. +* Currently, the namespace quota size computation does not take into account the size for manifest child. This is a known issue and will be fixed in a future version of {productname}. +==== + +=== Testing the storage quota limits feature in proxy organizations + +Use the following procedure to test the auto-pruning feature of an organization with proxy cache and storage quota limitations enabled. + +.Prerequisites + +* Your organization is configured to serve as a proxy organization. The following example proxies from quay.io. + +* `FEATURE_PROXY_CACHE` is set to `true` in your `config.yaml` file. + +* `FEATURE_QUOTA_MANAGEMENT` is set to `true` in your `config.yaml` file. + +* Your organization is configured with a quota limit, for example, `150 MB`. + +.Procedure + +. Pull an image to your repository from your proxy organization, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/quay:3.7.9 +---- + +. Depending on the space left in your repository, you might need to pull additional images from your proxy organization, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/quay:3.6.2 +---- + +. In the {productname} registry UI, click the name of your repository. + +* Click *Tags* in the navigation pane and ensure that `quay:3.7.9` and `quay:3.6.2` are tagged. + +. Pull the last image that will result in your repository exceeding the the allotted quota, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/quay:3.5.1 +---- + +. Refresh the *Tags* page of your {productname} registry. The first image that you pushed, for example, `quay:3.7.9` should have been auto-pruned. The *Tags* page should now show `quay:3.6.2` and `quay:3.5.1`. \ No newline at end of file diff --git a/modules/proxy-cache-procedure.adoc b/modules/proxy-cache-procedure.adoc index 105ab88b7..ae991ddc6 100644 --- a/modules/proxy-cache-procedure.adoc +++ b/modules/proxy-cache-procedure.adoc @@ -7,12 +7,14 @@ The following procedure describes how you can use {productname} to proxy a remot .Prerequisites * `FEATURE_PROXY_CACHE` in your config.yaml is set to `true`. +* Assigned the *Member* team role. For more information about team roles, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/user-org-intro[Users and organizations in {productname}]. + .Procedure . In your Quay organization on the UI, for example, `cache-quayio`, click *Organization Settings* on the left hand pane. -. Optional: Click *Add Storage Quota* to configure quota management for your organization. For more information about quota management, see link:insert_link_here[Quota Management]. +. Optional: Click *Add Storage Quota* to configure quota management for your organization. For more information about quota management, see link:https://access.redhat.com//documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[Quota Management]. + [NOTE] ==== @@ -38,7 +40,7 @@ If you do not set a *Remote Registry Username* and *Remote Registry Password*, y [NOTE] ==== * The default tag *Expiration* field for cached images in a proxy organization is set to 86400 seconds. In the proxy organization, the tag expiration is refreshed to the value set in the UI's *Expiration* field every time the tag is pulled. This feature is different than Quay's default link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/use_red_hat_quay/index#tag-expiration[individual tag expiration] feature. In a proxy organization, it is possible to override the individual tag feature. When this happens, the individual tag's expiration is reset according to the *Expiration* field of the proxy organization. -* Expired images will disappear after the allotted time, but are still stored in Quay. The time in which an image is completely deleted, or garbage collected, depends on the *Time Machine* setting of your organization. The default time for garbage collection is 14 days unless otherwise specified. +* Expired images will disappear after the allotted time, but are still stored in Quay. The time in which an image is completely deleted, or collected, depends on the *Time Machine* setting of your organization. The default time for garbage collection is 14 days unless otherwise specified. ==== . Click *Save*. diff --git a/modules/public-cloud-aws.adoc b/modules/public-cloud-aws.adoc index c0c79ba34..2f7084122 100644 --- a/modules/public-cloud-aws.adoc +++ b/modules/public-cloud-aws.adoc @@ -1,13 +1,17 @@ -= Running Red Hat Quay on AWS +:_content-type: CONCEPT +[id="arch-quay-on-aws"] += Running {productname} on Amazon Web Services -image:178_Quay_architecture_0821_on_AWS.png[Red Hat Quay on AWS] - -If Red Hat Quay is running on AWS, you can use +If {productname} is running on Amazon Web Services (AWS), you can use the following features: * AWS Elastic Load Balancer -* AWS S3 (hot) blob storage -* AWS RDS database +* AWS S3 (hot) blob storage +* AWS RDS database * AWS ElastiCache Redis -* EC2 VMs recommendation: M3.Large or M4.XLarge +* EC2 virtual machine recommendation: M3.Large or M4.XLarge + +The following image provides a high level overview of {productname} running on AWS: +.{productname} on AWS +image:178_Quay_architecture_0821_on_AWS.png[{productname} on AWS] diff --git a/modules/public-cloud-azure.adoc b/modules/public-cloud-azure.adoc index 5380f1c7b..c59aa57fe 100644 --- a/modules/public-cloud-azure.adoc +++ b/modules/public-cloud-azure.adoc @@ -1,9 +1,15 @@ -= Running Red Hat Quay on Microsoft Azure +:_content-type: CONCEPT +[id="arch-quay-on-azure"] += Running {productname} on Microsoft Azure -image:178_Quay_architecture_0821_on_Azure.png[Red Hat Quay on Microsoft Azure] +If {productname} is running on Microsoft Azure, you can use the following features: -If Quay is running on Microsoft Azure, you can use: - -* Azure managed services such as HA PostgreSQL -* Azure Blob Storage must be hot storage (not Azure Cool Blob Storage) +* Azure managed services such as highly available PostgreSQL +* Azure Blob Storage must be hot storage +** Azure cool storage is not available for {productname} * Azure Cache for Redis + +The following image provides a high level overview of {productname} running on Microsoft Azure: + +.{productname} on Microsoft Azure +image:178_Quay_architecture_0821_on_Azure.png[{productname} on Microsoft Azure] \ No newline at end of file diff --git a/modules/public-cloud-intro.adoc b/modules/public-cloud-intro.adoc index fb20bca19..129e6bbb5 100644 --- a/modules/public-cloud-intro.adoc +++ b/modules/public-cloud-intro.adoc @@ -1,9 +1,7 @@ -= Quay on public cloud - -Quay can run on public clouds, either in standalone mode or where OpenShift itself has been deployed on public cloud. - -Recommendation: If Quay is running on public cloud, then you should use the public cloud services for Quay backend services to ensure proper HA and scalability - -A full list of tested and supported configurations can be found in the Red Hat Quay Tested Integrations Matrix at link:https://access.redhat.com/articles/4067991[] +:_content-type: CONCEPT +[id="arch-deploy-quay-public-cloud"] += Deploying {productname} on public cloud +{productname} can run on public clouds, either in standalone mode or where {ocp} itself has been deployed on public cloud. A full list of tested and supported configurations can be found in the {productname} *Tested Integrations Matrix* at link:https://access.redhat.com/articles/4067991[]. +**Recommendation:** If {productname} is running on public cloud, then you should use the public cloud services for {productname} backend services to ensure proper high availability and scalability. \ No newline at end of file diff --git a/modules/qbo-operator-upgrade.adoc b/modules/qbo-operator-upgrade.adoc index 61dbd92d7..7da3e2ed1 100644 --- a/modules/qbo-operator-upgrade.adoc +++ b/modules/qbo-operator-upgrade.adoc @@ -3,7 +3,7 @@ To upgrade the Quay Bridge Operator (QBO), change the Channel Subscription update channel in the Subscription tab to the desired channel. -When upgrading QBO from version 3.5 to 3.6, a number of extra steps are required: +When upgrading QBO from version 3.5 to 3.7, a number of extra steps are required: . You need to create a new `QuayIntegration` custom resource. This can be completed in the Web Console or from the command line. + @@ -35,4 +35,4 @@ $ oc create -f upgrade-quay-integration.yaml [source,bash] ---- $ oc delete mutatingwebhookconfigurations.admissionregistration.k8s.io quay-bridge-operator ----- +---- diff --git a/modules/quay-as-cache-proxy.adoc b/modules/quay-as-cache-proxy.adoc index de9b98299..1b72dfb5f 100644 --- a/modules/quay-as-cache-proxy.adoc +++ b/modules/quay-as-cache-proxy.adoc @@ -5,18 +5,13 @@ With the growing popularity of container development, customers increasingly rel With this feature, {productname} will act as a proxy cache to circumvent pull-rate limitations from upstream registries. Adding a cache feature also accelerates pull performance, because images are pulled from the cache rather than upstream dependencies. Cached images are only updated when the upstream image digest differs from the cached image, reducing rate limitations and potential throttling. -With the {productname} cache proxy technology preview, the following features are available: +With {productname} cache proxy, the following features are available: * Specific organizations can be defined as a cache for upstream registries. * Configuration of a Quay organization that acts as a cache for a specific upstream registry. This repository can be defined by using the Quay UI, and offers the following configurations: ** Upstream registry credentials for private repositories or increased rate limiting. ** Expiration timer to avoid surpassing cache organization size. -+ -[NOTE] -==== -Because cache proxy is still marked as `Technology Preview`, there is no storage quota support yet. When this feature goes `General Availability` in a future release of {productname}, the expiration timer will be supplemented by another timer that protects against intermittent upstream registry issues. -==== * Global on/off configurable via the configuration application. -* Caching of entire upstream registries or just a single namespace, for example, all of `\docker.io` or just `\docker.io/library`. +* Caching of entire upstream registries or just a single namespace, for example, all of `docker.io` or just `docker.io/library`. * Logging of all cache pulls. * Cached images scannability by Clair. diff --git a/modules/quay-internal-registry-intro.adoc b/modules/quay-internal-registry-intro.adoc index 30abe4194..58918a682 100644 --- a/modules/quay-internal-registry-intro.adoc +++ b/modules/quay-internal-registry-intro.adoc @@ -1,6 +1,7 @@ -[[quay-internal-registry-intro]] +:_content-type: CONCEPT +[id="quay-internal-registry-intro"] = Using {productname} with or without internal registry -{productname} can be used as an external registry in front of multiple OpenShift clusters with their internal registries. +{productname} can be used as an external registry in front of multiple {ocp} clusters with their internal registries. -{productname} can also be used in place of the internal registry when it comes to automating builds and deployment rollouts. The required coordination of `Secrets` and `ImageStreams` is automated by the Quay Bridge Operator, which can be launched from the OperatorHub for OpenShift. +{productname} can also be used in place of the internal registry when it comes to automating builds and deployment rollouts. The required coordination of `Secrets` and `ImageStreams` is automated by the Quay Bridge Operator, which can be launched from the OperatorHub for {ocp}. diff --git a/modules/quay-robot-accounts-intro.adoc b/modules/quay-robot-accounts-intro.adoc index 7c0134772..e1fa68c2d 100644 --- a/modules/quay-robot-accounts-intro.adoc +++ b/modules/quay-robot-accounts-intro.adoc @@ -3,7 +3,7 @@ Robot accounts are named tokens that hold credentials for accessing external repositories. By assigning credentials to a robot, that robot can be used across multiple mirrored repositories that need to access the same external registry. -Robot accounts are managed inside of the *Robot Accounts* tab. They can only belong to one organization, but can be assigned to multiple Teams. +Robot accounts are managed on the *Robot Accounts* tab. They can only belong to one organization, but can be assigned to multiple Teams. [NOTE] ==== @@ -11,4 +11,4 @@ Teams and users can belong to multiple organizations. ==== //should probably be an xref -For more information on robot accounts, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index#working-with-mirrored-repo[Working with mirrored repositories]. +For more information on robot accounts, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/use_red_hat_quay/index#allow-robot-access-user-repo[Allowing robot access to a user repository]. diff --git a/modules/quay-sso-keycloak-intro.adoc b/modules/quay-sso-keycloak-intro.adoc index 7dd8b974a..9beeac7e1 100644 --- a/modules/quay-sso-keycloak-intro.adoc +++ b/modules/quay-sso-keycloak-intro.adoc @@ -1,6 +1,6 @@ [[quay-sso-keycloak]] = {productname} and Red Hat SSO / Keycloak -Quay Enterprise can support authentication via OpenID Connect (OIDC). Red Hat Single Sign On (SSO) is an OIDC provider that allows administrators to have a seamless authentication integration between Quay Enterprise and other application platforms such as Red Hat OpenShift Container Platform. +{productname} can support authentication via OpenID Connect (OIDC). Red Hat Single Sign On (SSO) is an OIDC provider that allows administrators to have a seamless authentication integration between {productname} and other application platforms such as Red Hat OpenShift Container Platform. -{productname} and Red Hat SSO / Keycloak requires that TLS/SSL is properly configured to proceed with setup. Red Hat SSO supports many different types of OIDC. Quay Enterprise, however, only supports OIDC clients configured for link:https://access.redhat.com/solutions/3496181[Confidential Client Credentials]. For more information configuring Red Hat SSO, see link:https://access.redhat.com/solutions/3566061[Quay Enterprise with Red Hat Single Sign On / Keycloak]. +{productname} and Red Hat SSO / Keycloak requires that TLS/SSL is properly configured to proceed with setup. Red Hat SSO supports many different types of OIDC. {productname}, however, only supports OIDC clients configured for link:https://access.redhat.com/solutions/3496181[Confidential Client Credentials]. For more information configuring Red Hat SSO, see link:https://access.redhat.com/solutions/3566061[Red Hat Quay with Red Hat Single Sign On / Keycloak]. diff --git a/modules/quay-super-users-intro.adoc b/modules/quay-super-users-intro.adoc index 5683a4055..54bdbbe98 100644 --- a/modules/quay-super-users-intro.adoc +++ b/modules/quay-super-users-intro.adoc @@ -1,10 +1,10 @@ [[quay-super-users]] -= {productname} Super users += {productname} super users `Super users` are a group of {productname} users with enhanced access and privileges, including: * Super user API calls that are not visible or accessible to normal users. -* Access to the `Super User Admin Panel`, which allows users to +* Access to the `Super User Admin Panel`, which allows users to: ** Change a user's email address, password, delete, or disable users. ** Rename, delete, or take ownership of an organization. ** Change expiration time, rename, or delete service keys such as Clair. diff --git a/modules/quay-users-intro.adoc b/modules/quay-users-intro.adoc index d7fa96b39..63f27ea04 100644 --- a/modules/quay-users-intro.adoc +++ b/modules/quay-users-intro.adoc @@ -17,4 +17,4 @@ Each user automatically gets their own user namespace, for example, quay.example User namespaces are different from Quay organizations. There are no teams, usage logs, default permissions, or OAuth applications. By comparison, organizations are listed under quay.example.com/organization. ==== -Users who are not members of an organization can be added to the repository and given permissions. External users are marked with a special character inside of the *Users and Robot Permissions* section under the *Repository Settings* tab. They are also shown under the *Collaborators View* tab on on the organization level of the *Teams and Membership* page. +Users who are not members of an organization can be added to the repository and given permissions. External users are marked with a special character in the *Users and Robot Permissions* section under the *Repository Settings* tab. They are also shown under the *Collaborators View* tab on on the organization level of the *Teams and Membership* page. diff --git a/modules/quota-establishment-api.adoc b/modules/quota-establishment-api.adoc index fd1333594..5b835148f 100644 --- a/modules/quota-establishment-api.adoc +++ b/modules/quota-establishment-api.adoc @@ -1,17 +1,27 @@ [[quota-establishment-api]] -= Establishing quota with the {productname} API += Establishing quota with the {productname} API -When an organization is first created, it does not have a quota applied. +When an organization is first created, it does not have a quota applied. Use the */api/v1/organization/{organization}/quota* endpoint: -.No initial quota -image:quota-no-quota.png[No quota] +.Sample command +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq +---- + +.Sample output +[source,terminal] +---- +[] +---- == Setting the quota +To set a quota for an organization, POST data to the */api/v1/organization/{orgname}/quota* endpoint: .Sample command [source,terminal] ---- -$ curl -k -X POST -H "Authorization: Bearer " -H 'Content-Type: application/json' -d '{"limit_bytes": 10485760}' https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/namespacequota/testorg/quota | jq +$ curl -k -X POST -H "Authorization: Bearer " -H 'Content-Type: application/json' -d '{"limit_bytes": 10485760}' https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/organization/testorg/quota | jq ---- .Sample output @@ -22,10 +32,12 @@ $ curl -k -X POST -H "Authorization: Bearer " -H 'Content-Type: applicati == Viewing the quota +To see the applied quota, `GET` data from the */api/v1/organization/{orgname}/quota* endpoint: + .Sample command [source,terminal] ---- -$ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLwwIL" -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq ---- .Sample output @@ -42,18 +54,14 @@ $ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLw ] ---- - -.Organization quota of 10MB -image:quota-10MB-empty.png[10MB quota] - == Modifying the quota -Use the PUT command to change the existing quota, in this instance, from 10MB to 100MB: +To change the existing quota, in this instance from 10 MB to 100 MB, PUT data to the */api/v1/organization/{orgname}/quota/{quota_id}* endpoint: .Sample command [source,terminal] ---- -$ curl -k -X PUT -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLwwIL" -H 'Content-Type: application/json' -d '{"limit_bytes": 104857600}' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota/1 | jq +$ curl -k -X PUT -H "Authorization: Bearer " -H 'Content-Type: application/json' -d '{"limit_bytes": 104857600}' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota/1 | jq ---- .Sample output @@ -68,18 +76,14 @@ $ curl -k -X PUT -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLw } ---- -.Organization quota of 100MB -image:quota-100MB-empty.png[100MB quota] - -The quota settings are also visible in the organization settings UI: - -.Organization settings UI -image:quota-100MB-settings-ui.png[Organization settings UI] - == Pushing images +To see the storage consumed, push various images to the organization. + === Pushing ubuntu:18.04 +Push ubuntu:18.04 to the organization from the command line: + .Sample commands [source,terminal] ---- @@ -90,20 +94,15 @@ $ podman tag docker.io/library/ubuntu:18.04 example-registry-quay-quay-enterpris $ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:18.04 ---- -The UI shows the total proportion of the quota used up by this first Ubuntu image: -.Total Quota Consumed for first image -image:quota-first-image.png[Total Quota Consumed for first image] +=== Using the API to view quota usage - - - -=== Using the API to see quota usage +To view the storage consumed, `GET` data from the */api/v1/repository* endpoint: .Sample command [source,terminal] ---- -$ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLwwIL" -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true"a=true' | jq +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true"a=true' | jq ---- .Sample output @@ -132,7 +131,7 @@ $ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLw === Pushing another image -. Pull, tag and push the `nginx` image: +. Pull, tag, and push a second image, for example, `nginx`: + .Sample commands [source,terminal] @@ -144,12 +143,12 @@ $ podman tag docker.io/library/nginx example-registry-quay-quay-enterprise.apps. $ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/nginx ---- -. View the quota report +. To view the quota report for the repositories in the organization, use the */api/v1/repository* endpoint: + .Sample command [source,terminal] ---- -$ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLwwIL" -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true"a=true' +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true"a=true' ---- + .Sample output @@ -190,12 +189,13 @@ $ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLw ] } ---- -. View the organization details + +. To view the quota information in the organization details, use the */api/v1/organization/{orgname}* endpoint: + .Sample command [source,terminal] ---- -$ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLwwIL" -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg' | jq +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg' | jq ---- + .Sample output @@ -218,33 +218,37 @@ $ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLw } ---- - - -.Total Quota Consumed for two images -image:quota-second-image.png[Total Quota Consumed for two images] - == Rejecting pushes using quota limits +If an image push exceeds defined quota limitations, a soft or hard check occurs: + +* For a soft check, or _warning_, users are notified. +* For a hard check, or _reject_, the push is terminated. === Setting reject and warning limits +To set _reject_ and _warning_ limits, POST data to the */api/v1/organization/{orgname}/quota/{quota_id}/limit* endpoint: + .Sample reject limit command [source,terminal] ---- -$ curl -k -X POST -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLwwIL" -H 'Content-Type: application/json' -d '{"type":"Reject","threshold_percent":80}' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota/1/limit +$ curl -k -X POST -H "Authorization: Bearer " -H 'Content-Type: application/json' -d '{"type":"Reject","threshold_percent":80}' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota/1/limit ---- - .Sample warning limit command [source,terminal] ---- -$ curl -k -X POST -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLwwIL" -H 'Content-Type: application/json' -d '{"type":"Warning","threshold_percent":50}' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota/1/limit +$ curl -k -X POST -H "Authorization: Bearer " -H 'Content-Type: application/json' -d '{"type":"Warning","threshold_percent":50}' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota/1/limit ---- +=== Viewing reject and warning limits + +To view the _reject_ and _warning_ limits, use the */api/v1/organization/{orgname}/quota* endpoint: + .View quota limits [source,terminal] ---- -$ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnLwwIL" -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq ---- @@ -273,14 +277,12 @@ $ curl -k -X GET -H "Authorization: Bearer NspeNNVPobaRjOBSb3WbfZdVtu7ZGSvKoHnL ] ---- -.Quota limits in Organization Settings -image:quota-limits.png[Quota limits in Organization Settings] - - -=== Pushing image when reject limit is exceeded +=== Pushing an image when the reject limit is exceeded In this example, the reject limit (80%) has been set to below the current repository size (~83%), so the next push should automatically be rejected. +Push a sample image to the organization from the command line: + .Sample image push [source,terminal] ---- @@ -291,7 +293,6 @@ $ podman tag docker.io/library/ubuntu:20.04 example-registry-quay-quay-enterpris $ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:20.04 ---- - .Sample output when quota exceeded [source,terminal] ---- @@ -299,17 +300,17 @@ Getting image source signatures Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB -WARN[0002] failed, retrying in 1s ... (1/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +WARN[0002] failed, retrying in 1s ... (1/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace Getting image source signatures Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB -WARN[0005] failed, retrying in 1s ... (2/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +WARN[0005] failed, retrying in 1s ... (2/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace Getting image source signatures Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB -WARN[0009] failed, retrying in 1s ... (3/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +WARN[0009] failed, retrying in 1s ... (3/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace Getting image source signatures Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB @@ -324,4 +325,3 @@ When limits are exceeded, a notification appears: .Quota notifications image:quota-notifications.png[Quota notifications] - diff --git a/modules/quota-establishment-ui.adoc b/modules/quota-establishment-ui.adoc index 264036603..cd47e2dad 100644 --- a/modules/quota-establishment-ui.adoc +++ b/modules/quota-establishment-ui.adoc @@ -5,29 +5,129 @@ The following procedure describes how you can report storage consumption and est .Prerequisites -* A valid {productname} repository. -* A superuser administrator. +* A {productname} registry. +* A superuser account. * Enough storage to meet the demands of quota limitations. .Procedure -. Log in to your organization's repository as a superuser. +. Create a new organization or choose an existing one. Initially, no quota is configured, as can be seen on the *Organization Settings* tab: ++ +image:quota-none-org-settings.png[No Quota Configured] + +. Log in to the registry as a superuser and navigate to the *Manage Organizations* tab on the *Super User Admin Panel*. Click the *Options* icon of the organization for which you want to create storage quota limits: ++ +image:quota-su-org-options.png[Organization options] + +. Click *Configure Quota* and enter the initial quota, for example, *10 MB*. Then click *Apply* and *Close*: ++ +image:quota-su-init-10MB.png[Initial quota] + +. Check that the quota consumed shows *0 of 10 MB* on the *Manage Organizations* tab of the superuser panel: ++ +image:quota-su-init-consumed.png[Initial consumed quota] ++ +The consumed quota information is also available directly on the Organization page: ++ +.Initial consumed quota +image:quota-org-init-consumed.png[Initial consumed quota] + +. To increase the quota to 100MB, navigate to the *Manage Organizations* tab on the superuser panel. Click the *Options* icon and select *Configure Quota*, setting the quota to 100 MB. Click *Apply* and then *Close*: ++ +image:quota-su-increase-100MB.png[Increase quota] + +. Push a sample image to the organization from the command line: ++ +.Sample commands +[source,terminal] +---- +$ podman pull ubuntu:18.04 + +$ podman tag docker.io/library/ubuntu:18.04 example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:18.04 + +$ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:18.04 +---- + +. On the superuser panel, the quota consumed per organization is displayed: ++ +image:quota-su-consumed-first.png[Total Quota Consumed for first image] + +. The Organization page shows the total proportion of the quota used by the image: ++ +.Total Quota Consumed for first image +image:quota-org-consumed-first.png[Total Quota Consumed for first image] + +. Pull, tag, and push a second image, for example, `nginx`: ++ +.Sample commands +[source,terminal] +---- +$ podman pull nginx + +$ podman tag docker.io/library/nginx example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/nginx + +$ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/nginx +---- + +. The Organization page shows the total proportion of the quota used by each repository in that organization: ++ +.Total Quota Consumed for each repository +image:quota-org-consumed-second.png[Total Quota Consumed for each repository] -. On the *Users and Organizations* panel, click the name of the repository you want to define storage quota limits for. +. Create _reject_ and _warning_ limits: ++ +From the superuser panel, navigate to the *Manage Organizations* tab. Click the *Options* icon for the organization and select *Configure Quota*. In the *Quota Policy* section, with the *Action* type set to *Reject*, set the *Quota Threshold* to *80* and click *Add Limit*: ++ +image:quota-su-reject-80.png[Reject limit] -. Click *Organization Settings* on the left hand pane. +. To create a _warning_ limit, select *Warning* as the *Action* type, set the *Quota Threshold* to *70* and click *Add Limit*: + -[NOTE] -==== -Alternatively, you can establish quota management by clicking *Manage Organizations* on the left hand pane, and then clicking *Settings* -> *Configure Quota*. -==== +image:quota-su-warning-70.png[Warning limit] -. Set the organization quota to the desired amount in the *Quota Management* section, for example, 10 GB. +. Click *Close* on the quota popup. The limits are viewable, but not editable, on the *Settings* tab of the *Organization* page: ++ +image:quota-org-quota-policy.png[Quota policy in organization settings] -. Set the percentage at which users will be warned when the organization reaches their defined threshold. For example, setting *Warning* to 60 is equivalent to 60%. +. Push an image where the reject limit is exceeded: ++ +Because the reject limit (80%) has been set to below the current repository size (~83%), the next push is rejected automatically. ++ +.Sample image push +[source,terminal] +---- +$ podman pull ubuntu:20.04 -. Set the percentage at which pushes are rejected from the organization. For example, setting *Rejected* to 99 is equivalent to 99%. +$ podman tag docker.io/library/ubuntu:20.04 example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:20.04 -. Click *Save Quota Details*. +$ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:20.04 +---- ++ +.Sample output when quota exceeded +[source,terminal] +---- +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +WARN[0002] failed, retrying in 1s ... (1/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +WARN[0005] failed, retrying in 1s ... (2/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +WARN[0009] failed, retrying in 1s ... (3/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +---- -You can check the remaining storage on the *Repositories* page of your organization. You can also alter or remove quota limitations, warnings, and rejections by returning to the *Organization Settings* page. +. When limits are exceeded, notifications are displayed in the UI: ++ +.Quota notifications +image:quota-notifications.png[Quota notifications] \ No newline at end of file diff --git a/modules/quota-management-limitations.adoc b/modules/quota-management-limitations.adoc index 7e850aebd..00c25002d 100644 --- a/modules/quota-management-limitations.adoc +++ b/modules/quota-management-limitations.adoc @@ -6,10 +6,10 @@ Quota management helps organizations to maintain resource consumption. One limit The maximum storage quota size is dependent on the selected database: .Worker count environment variables -[cols="2a,2a, 2a",options="header"] +[cols="2a,2a",options="header"] |=== -| Variable | Description -| Postgres | 8388608 TB -| MySQL | 8388608 TB -| SQL Server | 16777216 TB +|Variable |Description +|Postgres |8388608 TB +|MySQL |8388608 TB +|SQL Server |16777216 TB |=== diff --git a/modules/ref_deploy_quay_openshift.adoc b/modules/ref_deploy_quay_openshift.adoc index f821c0f25..a4754be08 100644 --- a/modules/ref_deploy_quay_openshift.adoc +++ b/modules/ref_deploy_quay_openshift.adoc @@ -41,7 +41,7 @@ data: .dockerconfigjson: <1> type: kubernetes.io/dockerconfigjson ---- -<1> Change to include the credentials shown from link:https://access.redhat.com/solutions/3533201[Accessing Red Hat Quay] +<1> Change to include the credentials shown from link:https://access.redhat.com/solutions/3533201[Accessing {productname}] == {productname} storage diff --git a/modules/ref_quay-integration-config-fields.adoc b/modules/ref_quay-integration-config-fields.adoc new file mode 100644 index 000000000..055ef44be --- /dev/null +++ b/modules/ref_quay-integration-config-fields.adoc @@ -0,0 +1,24 @@ +:_content-type: REFERENCE +[id="quay-integration-config-fields"] += QuayIntegration configuration fields + +The following configuration fields are available for the QuayIntegration custom resource: + +[cols="4a,2a,2a",options="header"] +|=== +|Name |Description |Schema +|allowlistNamespaces + +(Optional) | A list of namespaces to include. |Array +|clusterID + +(Required) |The ID associated with this cluster. |String +|credentialsSecret.key + +(Required) | The secret containing credentials to communicate with the Quay registry. |Object +|denylistNamespaces + +(Optional) | A list of namespaces to exclude. |Array +|insecureRegistry + +(Optional) |Whether to skip TLS verification to the Quay registry |Boolean +|quayHostname + +(Required) |The hostname of the Quay registry. |String +|scheduledImageStreamImport + +(Optional) | Whether to enable image stream importing. |Boolean +|=== diff --git a/modules/repo-organizations-and-users-intro.adoc b/modules/repo-organizations-and-users-intro.adoc index 2a9008487..f4cbdfbc6 100644 --- a/modules/repo-organizations-and-users-intro.adoc +++ b/modules/repo-organizations-and-users-intro.adoc @@ -18,8 +18,8 @@ The following is a brief overview of the organization's main page: |Inherits all permissions sets for the team |*Creator* -|All member positions, plus the ability to create new repositories +|All member permissions, plus the ability to create new repositories |*Admin* -|Full administrative access to the organization, including the ability to create news, add members, and set permissions. +|Full administrative access to the organization, including the ability to create new repositories, add members, and set permissions. |=== diff --git a/modules/resetting-superuser-password-on-operator.adoc b/modules/resetting-superuser-password-on-operator.adoc new file mode 100644 index 000000000..290c165aa --- /dev/null +++ b/modules/resetting-superuser-password-on-operator.adoc @@ -0,0 +1,82 @@ +:_content-type: CONCEPT +[id="resetting-superuser-password-on-operator"] += Resetting superuser passwords on the {productname} Operator + +.Prerequisites + +* You have created a {productname} superuser. +* You have installed Python 3.9. +* You have installed the `pip` package manager for Python. +* You have installed the `bcrypt` package for `pip`. + +.Procedure + +. Log in to your {productname} deployment. + +. On the {ocp} UI, navigate to *Workloads* -> *Secrets*. + +. Select the namespace for your {productname} deployment, for example, `Project quay`. + +. Locate and store the PostgreSQL database credentials. + +. Generate a secure, hashed password using the `bcrypt` package in Python 3.9 by entering the following command: ++ +[source,terminal] +---- +$ python3.9 -c 'import bcrypt; print(bcrypt.hashpw(b"newpass1234", bcrypt.gensalt(12)).decode("utf-8"))' +---- ++ +.Example output ++ +[source,terminal] +---- +$2b$12$zoilcTG6XQeAoVuDuIZH0..UpvQEZcKh3V6puksQJaUQupHgJ4.4y +---- + +. On the CLI, log in to the database, for example: ++ +[source,terminal] +---- +$ oc rsh quayuser-quay-quay-database-669c8998f-v9qsl +---- + +. Enter the following command to open a connection to the `quay` PostgreSQL database server, specifying the database, username, and host address: ++ +[source,terminal] +---- +sh-4.4$ psql -U quayuser-quay-quay-database -d quayuser-quay-quay-database -W +---- + +. Enter the following command to connect to the default database for the current user: ++ +[source,terminal] +---- +quay=> \c +---- + +. Update the `password_hash` of the superuser admin who lost their password: ++ +[source,terminal] +---- +quay=> UPDATE public.user SET password_hash = '$2b$12$zoilcTG6XQeAoVuDuIZH0..UpvQEZcKh3V6puksQJaUQupHgJ4.4y' where username = 'quayadmin'; +---- + +. Enter the following to command to ensure that the `password_hash` has been updated: ++ +[source,terminal] +---- +quay=> select * from public.user; +---- ++ +.Example output ++ +[source,terminal] +---- +id | uuid | username | password_hash | email | verified | stripe_id | organization | robot | invoice_email | invalid_login_attempts | last_invalid_login |removed_tag_expiration_s | enabled | invoice_email_address | company | family_name | given_name | location | maximum_queued_builds_count | creation_date | last_accessed +----+--------------------------------------+-----------+--------------------------------------------------------------+-----------------------+--- +-------+-----------+--------------+-------+---------------+------------------------+----------------------------+--------------------------+------ +---+-----------------------+---------+-------------+------------+----------+-----------------------------+----------------------------+----------- +1 | 73f04ef6-19ba-41d3-b14d-f2f1eed94a4a | quayadmin | $2b$12$zoilcTG6XQeAoVuDuIZH0..UpvQEZcKh3V6puksQJaUQupHgJ4.4y | quayadmin@example.com | t | | f | f | f | 0 | 2023-02-23 07:54:39.116485 | 1209600 | t | | | | | | | 2023-02-23 07:54:39.116492 +---- + +. Navigate to your {productname} UI on {ocp} and log in using the new credentials. \ No newline at end of file diff --git a/modules/restoring-red-hat-quay-standalone.adoc b/modules/restoring-red-hat-quay-standalone.adoc new file mode 100644 index 000000000..9d53acda0 --- /dev/null +++ b/modules/restoring-red-hat-quay-standalone.adoc @@ -0,0 +1,238 @@ +:_content-type: PROCEDURE +[[restoring-red-hat-quay-standalone]] += Restoring {productname} on standalone deployments + +This procedure describes how to restore {productname} on standalone deployments. + +.Prerequisites + +* You have backed up your {productname} deployment. + +.Procedure + +. Create a new directory that will bind-mount to `/conf/stack` inside of the {productname} container: ++ +[source,terminal] +---- +$ mkdir /opt/new-quay-install +---- + +. Copy the contents of your temporary backup directory created in xref:backing-up-red-hat-quay-standalone[Backing up {productname} on standalone deployments] to the `new-quay-install1` directory created in Step 1: ++ +[source,terminal] +---- +$ cp /tmp/quay-backup/quay-backup.tar.gz /opt/new-quay-install/ +---- + +. Change into the `new-quay-install` directory by entering the following command: ++ +[source,terminal] +---- +$ cd /opt/new-quay-install/ +---- + +. Extract the contents of your {productname} directory: ++ +[source,terminal] +---- +$ tar xvf /tmp/quay-backup/quay-backup.tar.gz * +---- ++ +Example output: ++ +---- +config.yaml +config.yaml.bak +extra_ca_certs/ +extra_ca_certs/ca.crt +ssl.cert +ssl.key +---- + +. Recall the `DB_URI` from your backed-up `config.yaml` file by entering the following command: ++ +[source,terminal] +---- +$ grep DB_URI config.yaml +---- ++ +Example output: ++ +[source,yaml] +---- +postgresql://:test123@172.24.10.50/quay +---- + +. Run the following command to enter the PostgreSQL database server: ++ +[source,terminal] +---- +$ sudo postgres +---- + +. Enter psql and create a new database in 172.24.10.50 to restore the quay databases, for example, `example_restore_registry_quay_database`, by entering the following command: ++ +[source,terminal] +---- +$ psql "host=172.24.10.50 port=5432 dbname=postgres user= password=test123" +postgres=> CREATE DATABASE example_restore_registry_quay_database; +---- ++ +Example output: ++ +---- +CREATE DATABASE +---- + +. Connect to the database by running the following command: ++ +[source,terminal] +---- +postgres=# \c "example-restore-registry-quay-database"; +---- ++ +Example output: ++ +[source,terminal] +---- +You are now connected to database "example-restore-registry-quay-database" as user "postgres". +---- + +. Create a `pg_trmg` extension of your Quay database by running the following command: ++ +[source,terminal] +---- +example_restore_registry_quay_database=> CREATE EXTENSION IF NOT EXISTS pg_trgm; +---- ++ +Example output: ++ +[source,terminal] +---- +CREATE EXTENSION +---- + +. Exit the postgres CLI by entering the following command: ++ +[source,terminal] +---- +\q +---- + +. Import the database backup to your new database by running the following command: ++ +[source,terminal] +---- +$ psql "host=172.24.10.50 port=5432 dbname=example_restore_registry_quay_database user= password=test123" -W < /tmp/quay-backup/quay-backup.sql +---- ++ +Example output: ++ +---- +SET +SET +SET +SET +SET +---- ++ +Update the value of `DB_URI` in your `config.yaml` from `postgresql://:test123@172.24.10.50/quay` to `postgresql://:test123@172.24.10.50/example-restore-registry-quay-database` before restarting the {productname} deployment. ++ +[NOTE] +==== +The DB_URI format is `DB_URI postgresql://:@/`. If you are moving from one PostgreSQL server to another PostgreSQL server, update the value of ``, `` and `` at the same time. +==== + + + +. In the `/opt/new-quay-install` directory, print the contents of your `DISTRIBUTED_STORAGE_CONFIG` bundle: ++ +[source,terminal] +---- +$ cat config.yaml | grep DISTRIBUTED_STORAGE_CONFIG -A10 +---- ++ +Example output: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: +DISTRIBUTED_STORAGE_CONFIG: + default: + - S3Storage + - s3_bucket: + storage_path: /registry + s3_access_key: + s3_secret_key: + host: +---- ++ +[NOTE] +==== +Your `DISTRIBUTED_STORAGE_CONFIG` in `/opt/new-quay-install` must be updated before restarting your {productname} deployment. +==== + +. Export the `AWS_ACCESS_KEY_ID` by using the `access_key` credential obtained in Step 13: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID= +---- + +. Export the `AWS_SECRET_ACCESS_KEY` by using the `secret_key` obtained in Step 13: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY= +---- + +. Create a new s3 bucket by entering the following command: ++ +[source,terminal] +---- +$ aws s3 mb s3:// --region us-east-2 +---- ++ +Example output: ++ +[source,terminal] +---- +$ make_bucket: quay +---- + +. Upload all blobs to the new s3 bucket by entering the following command: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl \ +--endpoint-url <1> +/tmp/quay-backup/blob-backup/. s3://quay/ +---- +<1> The {productname} registry endpoint must be the same before backup and after restore. ++ +Example output: ++ +[source,terminal] +---- +upload: ../../tmp/quay-backup/blob-backup/datastorage/registry/sha256/50/505edb46ea5d32b5cbe275eb766d960842a52ee77ac225e4dc8abb12f409a30d to s3://quay/datastorage/registry/sha256/50/505edb46ea5d32b5cbe275eb766d960842a52ee77ac225e4dc8abb12f409a30d +upload: ../../tmp/quay-backup/blob-backup/datastorage/registry/sha256/27/27930dc06c2ee27ac6f543ba0e93640dd21eea458eac47355e8e5989dea087d0 to s3://quay/datastorage/registry/sha256/27/27930dc06c2ee27ac6f543ba0e93640dd21eea458eac47355e8e5989dea087d0 +upload: ../../tmp/quay-backup/blob-backup/datastorage/registry/sha256/8c/8c7daf5e20eee45ffe4b36761c4bb6729fb3ee60d4f588f712989939323110ec to s3://quay/datastorage/registry/sha256/8c/8c7daf5e20eee45ffe4b36761c4bb6729fb3ee60d4f588f712989939323110ec +... +---- + +. Before restarting your {productname} deployment, update the storage settings in your config.yaml: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: +DISTRIBUTED_STORAGE_CONFIG: + default: + - S3Storage + - s3_bucket: + storage_path: /registry + s3_access_key: + s3_secret_key: + host: +---- \ No newline at end of file diff --git a/modules/restoring-red-hat-quay.adoc b/modules/restoring-red-hat-quay.adoc new file mode 100644 index 000000000..a78db2e3d --- /dev/null +++ b/modules/restoring-red-hat-quay.adoc @@ -0,0 +1,288 @@ +[[restoring-up-red-hat-quay]] += Restoring {productname} + +This procedure is used to restore {productname} when the {productname} Operator manages the database. It should be performed after a backup of your {productname} registry has been performed. See xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] for more information. + + +.Prerequisites + +* {productname} is deployed on OpenShift Container Platform using the {productname} Operator. +* A backup of the {productname} configuration managed by the {productname} Operator has been created following the instructions in the xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] section +* Your {productname} database has been backed up. +* The object storage bucket used by {productname} has been backed up. +* The components `quay`, `postgres` and `objectstorage` are set to `managed: true` +* If the component `clair` is set to `managed: true`, the component `clairpostgres` is also set to `managed: true` (starting with {productname} Operator v3.7 or later) +* There is no running {productname} deployment managed by the {productname} Operator in the target namespace on your OpenShift Container Platform cluster + +[NOTE] +==== +If your deployment contains partially unmanaged database or storage components and you are using external services for Postgres or S3-compatible object storage to run your {productname} deployment, you must refer to the service provider or vendor documentation to restore their data from a backup prior to restore {productname} +==== + +== Restoring {productname} and its configuration from a backup + +[NOTE] +==== +These instructions assume you have followed the process in the xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] guide and create the backup files with the same names. +==== + +. Restore the backed up {productname} configuration and the generated keys from the backup: ++ +[source,terminal] +---- +$ oc create -f ./config-bundle.yaml + +$ oc create -f ./managed-secret-keys.yaml +---- ++ +[IMPORTANT] +==== +If you receive the error `Error from server (AlreadyExists): error when creating "./config-bundle.yaml": secrets "config-bundle-secret" already exists`, you must delete your existing resource with `$ oc delete Secret config-bundle-secret -n ` and recreate it with `$ oc create -f ./config-bundle.yaml`. +==== + +. Restore the `QuayRegistry` custom resource: ++ +[source,terminal] +---- +$ oc create -f ./quay-registry.yaml +---- + +. Check the status of the {productname} deployment and wait for it to be available: ++ +[source,terminal] +---- +$ oc wait quayregistry registry --for=condition=Available=true -n +---- + +== Scale down your {productname} deployment + +. *For Operator version 3.7 and newer:* Scale down the {productname} deployment by disabling auto scaling and overriding the replica count for Quay, mirror workers and Clair (if managed). Your `QuayRegistry` resource should look similar to the following: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: false <1> + - kind: quay + managed: true + overrides: <2> + replicas: 0 + - kind: clair + managed: true + overrides: + replicas: 0 + - kind: mirror + managed: true + overrides: + replicas: 0 + … +---- +<1> Disable auto scaling of Quay, Clair and Mirroring workers +<2> Set the replica count to 0 for components accessing the database and objectstorage + +. *For Operator version 3.6 and earlier:* Scale down the {productname} deployment by scaling down the {productname} Operator first and then the managed {productname} resources: ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/^quay-operator/ {print $1}') -n + +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-app/ {print $1}') -n +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-mirror/ {print $1}') -n +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/clair-app/ {print $1}') -n +---- + +. Wait for the `registry-quay-app`, `registry-quay-mirror` and `registry-clair-app` pods (depending on which components you set to be managed by Operator) to disappear. You can check their status by running the following command: ++ +[source,terminal] +---- +$ oc get pods -n +---- ++ +Example output: ++ +[source,terminal] +---- +registry-quay-config-editor-77847fc4f5-nsbbv 1/1 Running 0 9m1s +registry-quay-database-66969cd859-n2ssm 1/1 Running 0 6d1h +registry-quay-redis-7cc5f6c977-956g8 1/1 Running 0 5d21h +---- + +== Restore your {productname} database + +. Identify your Quay database pod: ++ +[source,terminal] +---- +$ oc get pod -l quay-component=postgres -n -o jsonpath='{.items[0].metadata.name}' +---- ++ +Example output: ++ +---- +quayregistry-quay-database-59f54bb7-58xs7 +---- + +. Upload the backup by copying it from the local environment and into the pod: ++ +---- +$ oc cp ./backup.sql -n registry-quay-database-66969cd859-n2ssm:/tmp/backup.sql +---- + +. Open a remote terminal to the database: ++ +[source,terminal] +---- +$ oc rsh -n registry-quay-database-66969cd859-n2ssm +---- + +. Enter psql: ++ +[source,terminal] +---- +bash-4.4$ psql +---- + +. You can list the database by running the following command: ++ +---- +postgres=# \l +---- ++ +Example output: ++ +[source,terminal] + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +----------------------------+----------------------------+----------+------------+------------+----------------------- +postgres | postgres | UTF8 | en_US.utf8 | en_US.utf8 | +quayregistry-quay-database | quayregistry-quay-database | UTF8 | en_US.utf8 | en_US.utf8 | + + +. Drop the database: ++ +---- +postgres=# DROP DATABASE "quayregistry-quay-database"; +---- ++ +Example output: ++ +---- +DROP DATABASE +---- + +. Exit the postgres CLI to re-enter bash-4.4: ++ +---- +\q +---- + +. Redirect your PostgreSQL database to your backup database: ++ +[source,terminal] +---- +sh-4.4$ psql < /tmp/backup.sql +---- + +. Exit bash: ++ +---- +sh-4.4$ exit +---- + +== Restore your {productname} object storage data + +. Export the `AWS_ACCESS_KEY_ID`: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_ACCESS_KEY_ID}' |base64 -d) +---- + +. Export the `AWS_SECRET_ACCESS_KEY`: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_SECRET_ACCESS_KEY}' |base64 -d) +---- + +. Upload all blobs to the bucket by running the following command: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl --endpoint https://$(oc get route s3 -n openshift-storage -o jsonpath='{.spec.host}') ./blobs s3://$(oc get cm -l app=noobaa -n -o jsonpath='{.items[0].data.BUCKET_NAME}') +---- + +[NOTE] +==== +You can also use link:https://rclone.org/[rclone] or link:https://s3tools.org/s3cmd[sc3md] instead of the AWS command line utility. +==== + +== Scale up your {productname} deployment + +. *For Operator version 3.7 and newer:* Scale up the {productname} deployment by re-enabling auto scaling, if desired, and removing the replica overrides for Quay, mirror workers and Clair as applicable. Your `QuayRegistry` resource should look similar to the following: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: true <1> + - kind: quay <2> + managed: true + - kind: clair + managed: true + - kind: mirror + managed: true + … +---- +<1> Re-enables auto scaling of {productname}, Clair and mirroring workers again (if desired) +<2> Replica overrides are removed again to scale the {productname} components back up + +. *For Operator version 3.6 and earlier:* Scale up the {productname} deployment by scaling up the {productname} Operator again: ++ +[source,terminal] +---- +$ oc scale --replicas=1 deployment $(oc get deployment -n | awk '/^quay-operator/ {print $1}') -n +---- + +. Check the status of the {productname} deployment: ++ +[source,terminal] +---- +$ oc wait quayregistry registry --for=condition=Available=true -n +---- ++ +Example output: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + ... + name: registry + namespace: + ... +spec: + ... +status: + - lastTransitionTime: '2022-06-20T05:31:17Z' + lastUpdateTime: '2022-06-20T17:31:13Z' + message: All components reporting as healthy + reason: HealthChecksPassing + status: 'True' + type: Available +---- diff --git a/modules/rn_3_10.adoc b/modules/rn_3_10.adoc index d01f398ad..e453aad3d 100644 --- a/modules/rn_3_10.adoc +++ b/modules/rn_3_10.adoc @@ -69,7 +69,7 @@ Known Issues: * During repository mirroring, in order to fetch tags from a repository, at least one tag in the list of tags to sync must exist exactly as specified. See -link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#repo-mirroring-in-red-hat-quay[Repository Mirroring in Red Hat Quay] for more details. +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#repo-mirroring-in-red-hat-quay[Repository Mirroring in {productname}] for more details. * Repository mirror config has known issues when remote registry username or password has characters requiring special handling for shell commands. Specifically, the tokens for registry.redhat.io with a pipe (|) character in them are incorrectly escaped. Out of an abundance of caution, a fix for this will follow in a subsequent update. link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-100[Link to this Release] diff --git a/modules/rn_3_30.adoc b/modules/rn_3_30.adoc index ae8c0d510..12b329c77 100644 --- a/modules/rn_3_30.adoc +++ b/modules/rn_3_30.adoc @@ -82,8 +82,8 @@ Fixed: Deprecated: -* "rkt" conversion: This feature is now marked as deprecated in the Red Hat Quay UI. Expect the feature to be removed completely in the near future. -* Bittorrent: This feature is deprecated and will not appear in the Red Hat Quay UI unless it is already configured in an existing Red Hat Quay config.yaml. This feature will be removed in the next version of Quay. +* "rkt" conversion: This feature is now marked as deprecated in the {productname} UI. Expect the feature to be removed completely in the near future. +* Bittorrent: This feature is deprecated and will not appear in the{productname} UI unless it is already configured in an existing {productname} config.yaml. This feature will be removed in the next version of Quay. * V1 Push Support: Docker V1 protocol support has been officially deprecated. Expect this feature to be removed in the next near future. * Squashed image support: This feature is deprecated. This feature will be removed in the next version of Quay. * images API: This API is deprecated and replaced by the manifest APIs. Expect this API to be removed completely in the near future. diff --git a/modules/rn_3_40.adoc b/modules/rn_3_40.adoc index 0750ca2cc..b5e16b85a 100644 --- a/modules/rn_3_40.adoc +++ b/modules/rn_3_40.adoc @@ -3,9 +3,9 @@ === quay / clair / quay-builder -Fixed: +Fixed: -* link:https://issues.redhat.com/browse/PROJQUAY-2479[PROJQUAY-2479]. Update downstream Operator extensions API to "v1" for 3.4. +* link:https://issues.redhat.com/browse/PROJQUAY-2479[PROJQUAY-2479]. Update downstream Operator extensions API to "v1" for 3.4. === quay-operator @@ -137,7 +137,7 @@ Added/Changed: FEATURE_GENERAL_OCI_SUPPORT: True FEATURE_HELM_OCI_SUPPORT: True ``` -* (Tech Preview) Due to necessary changes, the existing Red Hat Quay builders had to be removed and entirely rewritten. This has resulted in a loss of functionality so the new builders are being released as link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. Currently, builds are only available on OpenShift/Kubernetes utilizing Red Hat CoreOS for the sandbox VMs. The internal build manager has also been completely re-written to use gRPC and numerous core issues have been addressed. Please follow the provided documentation carefully when setting up. +* (Tech Preview) Due to necessary changes, the existing {productname} builders had to be removed and entirely rewritten. This has resulted in a loss of functionality so the new builders are being released as link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. Currently, builds are only available on OpenShift/Kubernetes utilizing Red Hat CoreOS for the sandbox VMs. The internal build manager has also been completely re-written to use gRPC and numerous core issues have been addressed. Please follow the provided documentation carefully when setting up. * NooBaa has graduated from Technical Preview (TP) and now has General Availability (GA) status. Fixed: diff --git a/modules/rn_3_50.adoc b/modules/rn_3_50.adoc index b874f38bf..5c800fdf2 100644 --- a/modules/rn_3_50.adoc +++ b/modules/rn_3_50.adoc @@ -108,7 +108,7 @@ Some features of Quay are not currently available when running on a FIPS-enabled Tech Preview -* Due to necessary changes, the existing Red Hat Quay builders had to be removed and entirely rewritten. This has resulted in a loss of functionality so the new builders are being released as link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. Currently, builds are only available on OpenShift/Kubernetes utilizing Red Hat CoreOS for the sandbox VMs. The internal build manager has also been completely re-written to use gRPC and numerous core issues have been addressed. Please follow the provided documentation carefully when setting up. +* Due to necessary changes, the existing {productname} builders had to be removed and entirely rewritten. This has resulted in a loss of functionality so the new builders are being released as link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. Currently, builds are only available on OpenShift/Kubernetes utilizing Red Hat CoreOS for the sandbox VMs. The internal build manager has also been completely re-written to use gRPC and numerous core issues have been addressed. Please follow the provided documentation carefully when setting up. Deprecated: diff --git a/modules/rn_3_60.adoc b/modules/rn_3_60.adoc index 4cd0923d3..a6c43ddf3 100644 --- a/modules/rn_3_60.adoc +++ b/modules/rn_3_60.adoc @@ -1,3 +1,60 @@ +[[rns-3-607]] + +== Version 3.6.7 + +=== quay / clair / quay-builder + +* link:https://issues.redhat.com/browse/PROJQUAY-3812[PROJQUAY-3812]. [3.6] Failed to create non-existing repository in user account namespace by image pushing + +[[rns-3-606]] + +== Version 3.6.6 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3146[PROJQUAY-3146]. Strange partial deletion of mirrored tags. + +* link:https://issues.redhat.com/browse/PROJQUAY-3404[PROJQUAY-3404]. Build logs page is blank on Super User Admin panel. + +* link:https://issues.redhat.com/browse/PROJQUAY-3405[PROJQUAY-3405]. Build "copy Logs" doesn't work. + +* link:https://issues.redhat.com/browse/PROJQUAY-3638[PROJQUAY-3638]. Quay config validator crashes on 3.6.5 startup. + +[[rns-3-605]] + +== Version 3.6.5 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2983[PROJQUAY-2983]. Config validation fails if no AWS access keys are provided ver. 2. + +* link:https://issues.redhat.com/browse/PROJQUAY-3437[PROJQUAY-3437]. CVE-2022-24761 quay-registry-container: waitress: Inconsistent Interpretation of HTTP Requests ('HTTP Request Smuggling'). + +Added/Changed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3421[PROJQUAY-3421]. Bump Clair to 4.4. + +=== quay-operator + +Added/Changed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3444[PROJQUAY-3444]. Adds subscription annotation to CSVs. + +[[rn-3-604]] + +== Version 3.6.4 + +=== quay-operator + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3317[PROJQUAY-3317]. Quay 3.6.3 APP POD was crashed when use unmanaged tls component. + + [[rn-3-603]] == Version 3.6.3 @@ -35,7 +92,7 @@ Fixed: * link:https://issues.redhat.com/browse/PROJQUAY-2696[PROJQUAY-2696]. Quay 3.6.0 Operator should block the deployment when route is managed. TLS is unmanaged without providing TLS Cert/Key pair. -* link:https://issues.redhat.com/browse/PROJQUAY-2335[PROJQUAY-2335]. Quay Operator should block the depoyment when Route is managed, TLS is unamanged without providing TLS Cert/key pairs. +* link:https://issues.redhat.com/browse/PROJQUAY-2335[PROJQUAY-2335]. Quay Operator should block the deployment when Route is managed, TLS is unmanaged without providing TLS Cert/key pairs. * link:https://issues.redhat.com/browse/PROJQUAY-2067[PROJQUAY-2067]. Operator 3.5.1 fails to check Route API on OpenShift Container Platform 4.8. @@ -148,7 +205,7 @@ For more information, see https://issues.redhat.com/browse/PROJQUAY-1417[PROQUAY * You can now use the API to create a first user. (link:https://issues.redhat.com/browse/PROJQUAY-1926[PROJQUAY-1926]) -* Support for nested repositories and extended repository names has been added. This change allows the use of `/` in repository names needed for certain OpenShift Container Platform use cases. (link:https://issues.redhat.com/browse/PROJQUAY-1535[PROJQUAY-1535]) +* Support for nested repositories and extended repository names has been added. This change allows the use of `/` in repository names needed for certain {ocp} use cases. (link:https://issues.redhat.com/browse/PROJQUAY-1535[PROJQUAY-1535]) * Registry users now have the option to set `CREATE_PRIVATE_REPO_ON_PUSH` in their config.yaml to `True` or `False` depending on their security needs. (link:https://issues.redhat.com/browse/PROJQUAY-1929[PROJQUAY-1929]) @@ -176,9 +233,9 @@ For more information, see link:https://issues.redhat.com/browse/PROJQUAY-2102?fi + For more information, see link:https://issues.redhat.com/browse/PROJQUAY-1693[PROJQUAY-1963]. -* Red Hat Enterprise Linux (RHEL) 8 is strongly recommended for highly available, production quality deployments of Red Hat Quay 3.6. RHEL 7 has not been tested with Red Hat Quay 3.6, and will be deprecated in a future release. +* Red Hat Enterprise Linux (RHEL) 8 is strongly recommended for highly available, production quality deployments of {productname} 3.6. RHEL 7 has not been tested with {productname} 3.6, and will be deprecated in a future release. -* Podman is strongly recommended for highly available, production quality deployments of Red Hat Quay 3.6. Docker has not been tested with Red Hat Quay 3.6, and will be deprecated in a future release. +* Podman is strongly recommended for highly available, production quality deployments of {productname} 3.6. Docker has not been tested with {productname} 3.6, and will be deprecated in a future release. Fixed: @@ -220,7 +277,7 @@ Fixed: New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. -Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of major functionality deprecated and remove with {productname} 3.6, refer to the table below. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to the table below. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. //This will eventually expand to cover the latest three releases. Since this is the first TP tracker, it will include only 3.6. @@ -244,15 +301,15 @@ Some features available in previous releases have been deprecated or removed. De ==== Deprecated features -* *FEATURE_HELM_OCI_SUPPORT*: This option has been deprecated and will be removed in a future version of {productname}. In {productname} {producty}, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. (link:https://issues.redhat.com/browse/PROJQUAY-2334[PROJQUAY-2334]) +* *FEATURE_HELM_OCI_SUPPORT*: This option has been deprecated and will be removed in a future version of {productname}. In {productname} 3.6, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. (link:https://issues.redhat.com/browse/PROJQUAY-2334[PROJQUAY-2334]) * *MySQL and MariaDB database support*: The MySQL and mariaDB databases have been deprecated as of {productname} 3.6. Support for these databases will be removed in a future version of {productname}. If starting a new {productname} installation, it is strongly recommended to use PostgreSQL. (link:https://issues.redhat.com/browse/PROJQUAY-1998[PROJQUAY-1998]) ==== Technology preview features -* *Java scanning with Clair*: With {product-title} 3.6, Clair 4.2 include support for Java scanning. Java scanning is dependent on an external service (CRDA) to gather vulnerability data. Because Clair is using a shared default token to access the CRDA service, it might encounter rate limiting if too many requests are made in a short period of time. Because of this, Clair might miss certain vulnerabilities, for example, log4j. +* *Java scanning with Clair*: With {productname} 3.6, Clair 4.2 includes support for Java scanning. Java scanning is dependent on an external service (CRDA) to gather vulnerability data. Because Clair is using a shared default token to access the CRDA service, it might encounter rate limiting if too many requests are made in a short period of time. Because of this, Clair might miss certain vulnerabilities, for example, log4j. + -Customers can obtain and use their own token for CRDA which might help avoid the occurrence of rate limiting by submitting link:https://docs.google.com/forms/d/e/1FAIpQLSfJ1aoOfi0tQ6_o0srEbYL-cSwzo_MyDJQnwcFG9IYVlNXJrA/viewform[this form]. Because of these issues, Java scanning for Clair is considered Technical Preview and will be enhanced in future Quay updates. +Customers can obtain and use their own token for CRDA which might help avoid the occurrence of rate limiting by submitting link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form]. Because of these issues, Java scanning for Clair is considered Technical Preview and will be enhanced in future Quay updates. === quay-operator diff --git a/modules/rn_3_70.adoc b/modules/rn_3_70.adoc index b4a0217d0..07ce6e1e3 100644 --- a/modules/rn_3_70.adoc +++ b/modules/rn_3_70.adoc @@ -1,3 +1,163 @@ +[[rn-3-708]] +== RHBA-2022:6353 - {productname} 3.7.8 bug fix update + +Issued: 2022-09-12 + +{productname} release 3.7.8 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:6353[RHBA-2022:6353] advisory. + +=== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-4222[PROJQUAY-4222]. Quay can't connect to MySQL backed by SSL certificate. + +* link:https://issues.redhat.com/browse/PROJQUAY-4362[PROJQUAY-4362]. Proxy authentication fails when the upstream registry doesn't return the correct www-authenticate header. + +[[rn-3-707]] +== RHBA-2022:6154 - {productname} 3.7.7 bug fix update + +Issued: 2022-08-31 + +{productname} release 3.7.7 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:6154[RHBA-2022:6154] advisory. + +=== New features + +* With this update, the `REPO_MIRROR_ROLLBACK` configuration field has been added. When this field is set to `true`, the repository rolls back after a failed mirror attempt. By default, this field is set to `false`. + +=== Bug fixes + +* Previously, users could only mirror and replicate the entirety of their upstream repository. When complex expressions for tag discovery were used, a list of several tags to be mirrored was created. If the mirroring process failed for any tag failed at any point during the replication procedure, {productname} would revert the repository to its previous state. If the mirrored repository was empty, all tags that were correctly mirrored were deleted. For example, if you mirrored 10 tags, and 8 tags were mirrored successfully, but 2 failed, all of the successful tags would be deleted from the repository because of the 2 that failed. ++ +With this update, if a mirroring operation fails, it will no longer roll back the state of the repository. Instead, it will log the images that failed to properly mirror. ++ +For users who want their repository rolled back upon failure, the `REPO_MIRROR_ROLLBACK` feature has been added. When the feature is set to `true`, the repository rolls back after a failed mirror attempt. By default, the feature is set to `false`. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4296[PROJQUAY-4296] and link:https://issues.redhat.com/browse/PROJQUAY-4357[PROJQUAY-4357]. + +* link:https://issues.redhat.com/browse/PROJQUAY-4322[PROJQUAY-4322]. The image mirrored unsuccessfully can be pulled successfully. + +* link:https://issues.redhat.com/browse/PROJQUAY-3976[PROJQUAY-3976]. Pull-thru gives 500 when pulling certain images. + +[[rn-3-706]] +== RHBA-2022:5999 - {productname} 3.7.6 bug fix update + +Issued: 2022-08-15 + +{productname} release 3.7.6 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:5999[RHBA-2022:5999] advisory. + +=== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-4227[PROJQUAY-4277]. Supported NGINX version in Quay's container. + +* link:https://issues.redhat.com/browse/PROJQUAY-2897[PROJQUAY-2897]. Ability to add annotations and labels to Quay development when using the Operator. + +* link:https://issues.redhat.com/browse/PROJQUAY-3743[PROJQUAY-3743]. Pull-thru proxy repository auto-creation should respect CREATE_PRIVATE_REPO_ON_PUSH config. + +* link:https://issues.redhat.com/browse/PROJQUAY-4229[PROJQUAY-4229]. Quay 3.7.5 images high vulnerability reported by Redhat ACS. + +* link:https://issues.redhat.com/browse/PROJQUAY-4254[PROJQUAY-4254]. Cannot cache (pull-thru) OCI image index. + + + +[[rn-3-705]] +== RHBA-2022:5727 - {productname} 3.7.5 bug fix update + +Issued: 2022-08-2 + +{productname} release 3.7.5 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:5727[RHBA-2022:5727] advisory. + +=== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-3982[PROJQUAY-3982]. Tags reverted after mirroring. + +* link:https://issues.redhat.com/browse/PROJQUAY-1569[PROJQUAY-1569]. Provide support for pod anti affinity for Quay Operator. + +* link:https://issues.redhat.com/browse/PROJQUAY-4148[PROJQUAY-4148]. Add RS384 support for OIDC flow. + +* link:https://issues.redhat.com/browse/PROJQUAY-1603[PROJQUAY-1603]. Container-security-operator does not take pull secrets of OpenShift into account. + +* link:https://issues.redhat.com/browse/PROJQUAY-2153[PROJQUAY-2153]. Allow CSO to define proxy variables. + + +[[rn-3-704]] +== RHBA-2022:5559 - {productname} 3.7.4 bug fix update + +Issued: 2022-07-18 + +{productname} release 3.7.4 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:5559[RHBA-2022:5559] advisory. + +=== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-3145[PROJQUAY-3145]. Usage logs error out with a 500 when repo mirroring is run with DEBUGLOG=true. + +* link:https://issues.redhat.com/browse/PROJQUAY-3819[PROJQUAY-3819]. Allow Builders to Use Self Signed Certificates. + +* link:https://issues.redhat.com/browse/PROJQUAY-4016[PROJQUAY-4016]. PrometheusRule is not being parsed correctly. + +* link:https://issues.redhat.com/browse/PROJQUAY-2659[PROJQUAY-2649]. Quay 3.6.0 Clair APP POD was failed to rolling update caused by PSQL error "FATAL: sorry, too many clients already". + +[[rn-3-703]] +== Version 3.7.3 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3965[PROJQUAY-3965]. Basic cosign signature visualization. + +* link:https://issues.redhat.com/browse/PROJQUAY-3981[PROJQUAY-3981]. Unable to navigate on level up in repo-view. + +* link:https://issues.redhat.com/browse/PROJQUAY-3999[PROJQUAY-3999]. Pushing big layers to Quay deployed on Azure OpenShift Cluster results in a 413. + +* link:https://issues.redhat.com/browse/PROJQUAY-3979[PROJQUAY-3979]. Quay 3.7.2 Postgres image vulnerability reported by Redhat ACS. + + +[[rn-3-702]] +== Version 3.7.2 + +=== quay / clair / quay-builder + +* link:https://issues.redhat.com/browse/PROJQUAY-3901[PROJQUAY-3901]. Clair 4.4.2 failed to fetch image layer from quay when image was from dockerhub. + +* link:https://issues.redhat.com/browse/PROJQUAY-3905[PROJQUAY-3905]. Quay 3.7.1 can't reconfig quota to replace system quota for super user account. + +* link:https://issues.redhat.com/browse/PROJQUAY-3802[PROJQUAY-3802]. Quay 3.7.0 image vulnerability reported by Redhat ACS. + +* link:https://issues.redhat.com/browse/PROJQUAY-1605[PROJQUAY-1605]. Quay 3.4 SMTP validation fails. + +* link:https://issues.redhat.com/browse/PROJQUAY-3879[PROJQUAY-3879]. The Quay Config Tool is not validating configurations for Github Enterprise Login. + +* link:https://issues.redhat.com/browse/PROJQUAY-3948[PROJQUAY-3948]. Show how to pull an image with podman. + +* link:https://issues.redhat.com/browse/PROJQUAY-3767[PROJQUAY-3767]. Quay 3.7.0 can't reconfig Quota to replace system default quota for user account. + +* link:https://issues.redhat.com/browse/PROJQUAY-3806[PROJQUAY-3806]. Cannot pull from proxy org as non-admin member. + +* link:https://issues.redhat.com/browse/PROJQUAY-3889[PROJQUAY-3889]. Quay quota consumption is not decreased in org level and image repo level after deleted image tags. + +* link:https://issues.redhat.com/browse/PROJQUAY-3920[PROJQUAY-3920]. Quay 3.7.1 can't config quota for normal user accounts by super user. + +* link:https://issues.redhat.com/browse/PROJQUAY-3614[PROJQUAY-3614]. The 'build successfully completed' does not send out notification by email, slack and UI notification. + + +[[rn-3-701]] +== Version 3.7.1 + +=== quay / clair / quay-builder + +* link:https://issues.redhat.com/browse/PROJQUAY-3841[PROJQUAY-3841]. Standalone UI Version is incorrect. + +* link:https://issues.redhat.com/browse/PROJQUAY-2346[PROJQUAY-2346]. Pushing failure of first attempt to create non-existing org or repository by skopeo and podman. + +* link:https://issues.redhat.com/browse/PROJQUAY-3701[PROJQUAY-3701]. Quay 3.7.0 API update default quota should not return 500 internal error. + +* link:https://issues.redhat.com/browse/PROJQUAY-3815[PROJQUAY-3815]. Custom Quota Warning Notification. + +* link:https://issues.redhat.com/browse/PROJQUAY-3818[PROJQUAY-3818]. pull-thru gives 500 when manifest list's sub-manifest is already proxied under different tag in same repo. + +* link:https://issues.redhat.com/browse/PROJQUAY-3828[PROJQUAY-3828]. Quay 3.7.0 quota consumption is not correct in image repo level when removed all tags. + +* link:https://issues.redhat.com/browse/PROJQUAY-3881[PROJQUAY-3881]. cert_install.sh script incorrectly parses certificates in certain situations. + + [[rn-3-700]] == Version 3.7.0 @@ -5,25 +165,40 @@ Added/Changed: +* Image APIs are now deprecated. Users should move to manifest-based APIs. (link:https://issues.redhat.com/browse/PROJQUAY-3418[PROJQUAY-3418]) + * With {productname} 3.7, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. With this feature, organizations can easily avoid exceeding storage limitations by rejecting pulls at a specified limit. (link:https://issues.redhat.com/browse/PROJQUAY-302[PROJQUAY-302], link:https://issues.redhat.com/browse/PROJQUAY-253[PROJQUAY-253]) * The bare-metal constraint required to run builds has been removed by adding an additional build option which does not contain the virtual machine layer. As a result, builds can be run on virtualized platforms. Backwards compatibility to run previous build configurations are also available. (link:https://issues.redhat.com/browse/PROJQUAY-295[PROJQUAY-295]) * {productname} can now act as a proxy cache to mitigate pull-rate limitations from upstream registries. This feature also accelerates pull performance, because images are pulled from the cache rather than upstream dependencies. Cached images are only updated when the upstream image digest differs from the cached image, reducing rate limitations and potential throttling. (link:https://issues.redhat.com/browse/PROJQUAY-465[PROJQUAY-465]) -* Support for Microsoft Azure Government (MAG) has been added. This optional feature allows government agencies and public sector customers to select and specify a MAG endpoint. (link:https://issues.redhat.com/browse/PROJQUAY-891[PROJQUAY-891]) +* Support for Microsoft Azure Government (MAG) has been added. This optional feature allows government agencies and public sector customers to select and specify a MAG endpoint in their Azure storage yaml. (link:https://issues.redhat.com/browse/PROJQUAY-891[PROJQUAY-891]) + +* Introduced in {productname} 3.6, Java scanning for Clair 4.2, which requires CRDA, included a default shared CRDA key and was enabled by default. Additionally, the default CRDA configuration supported low RPS. With {productname} 3.7, Java scanning no longer includes a default CRDA shared key, and is no longer enabled by default. Users must now manually enable CRDA for scan results, and enable it in Clair's configuration. To enable CRDA, see https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/manage_red_hat_quay#clair_crda_configuration[Clair CRDA configuration]. ++ +[NOTE] +==== +This feature is currently denoted as `Technology Preview`. +==== + +* {productname} now accepts unsigned images. This feature can be enabled under an organization's *Repository Mirroring* page. (link:https://issues.redhat.com/browse/PROJQUAY-3106[PROJQUAY-3106]) Known issues: * link:https://issues.redhat.com/browse/PROJQUAY-3590[PROJQUAY-3590]. Quay 3.7.0 pull from cache should return quota exceeded error rather than general 403 error code. +* link:https://issues.redhat.com/browse/PROJQUAY-3767[PROJQUAY-3767]. Quota for _user_ accounts cannot be reconfigured using the {productname} UI. + Fixed: -=== {productname} feature tracker +* link:https://issues.redhat.com/browse/PROJQUAY-3648[PROJQUAY-3648]. OAuth2 code flow: Missing state parameters when user is asked to authorize. -New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. +* link:https://issues.redhat.com/browse/PROJQUAY-2495[PROJQUAY-2495]. Gitlab validation fails on Quay 3.5.6. -Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of major functionality deprecated and remove with {productname} 3.7, refer to the table below. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. +* link:https://issues.redhat.com/browse/PROJQUAY-2560[PROJQUAY-2560]. The Quay Config Tool is not validating configurations for Github Enterprise Login. + +* link:https://issues.redhat.com/browse/PROJQUAY-3656[PROJQUAY-3656]. Could not verify GitHub OAuth credentials. === quay-operator @@ -34,36 +209,73 @@ Added/Changed: ** Configuration of Clair's updater set through the Quay Operator. ** Configuration of the database connection string through the Quay Operator. ** Configuration of custom certificates into the Clair deployment, which allows support of internal HTTPS proxies. -** Override, or re-size, the size and storage class of the Postgres Database PVC. ** Support for alternative fully qualified domain names (FQDN) for Clair that can leverage a global load balancing mechanism fronting different clusters running Clair. + For more information, see link:https://issues.redhat.com/browse/PROJQUAY-2110[PROJQUAY-2210]. * With advanced Clair configuration, users can also provide a custom Clair configuration for an unmanaged Clair database on the {productname} Operator. An unmanaged Clair database allows the {productname} Operator to work in a Geo-Replicated environment, where multiple instances of the Operator must communicate with the same database. An unmanaged Clair database can also be used when a user requires a highly-available (HA) Clair database that exists outside of a cluster. (link:https://issues.redhat.com/browse/PROJQUAY-1696[PROJQUAY-1969]) -* Geo-replication is now available with the Red Hat Quay Operator. This feature allows multiple, geographically distributed Quay deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed Quay setup. Image data is asynchronously replicated in the background with transparent failover / redirect for clients. (link:https://issues.redhat.com/browse/PROJQUAY-2504[PROJQUAY-2504]) +* Geo-replication is now available with the {productname} Operator. This feature allows multiple, geographically distributed Quay deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed Quay setup. Image data is asynchronously replicated in the background with transparent failover / redirect for clients. (link:https://issues.redhat.com/browse/PROJQUAY-2504[PROJQUAY-2504]) + +* With {productname} 3.7, reconfiguring Quay through the UI no longer generates a new login password. The password now generates only once, and remains the same after reconciling `QuayRegistry` objects. (link:https://issues.redhat.com/browse/PROJQUAY-3318[PROJQUAY-3318]) + +=== {productname} feature tracker +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to the table below. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. +//This will eventually expand to cover the latest three releases. Since this is the first TP tracker, it will include only 3.6. +.Technology Preview tracker +[cols="4,1,1",options="header"] +|=== +|Feature | Quay 3.7 |Quay 3.6 -Known issues: +|link:https://access.redhat.com//documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[Quota management and enforcement] +|General Availability +|- -Fixed: -=== quay-container-security-operator +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-builders-enhancement[{productname} build enhancements] +|General Availability +|- -Added/Changed: +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#quay-as-cache-proxy[{productname} as proxy cache for upstream registries] +|Technology Preview +|- -Known issues: +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index[Geo-replication - {productname} Operator] +|General Availability +|- -Fixed: -=== quay-openshift-bridge-operators +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/manage_red_hat_quay#unmanaged_clair_configuration[Advanced Clair configuration] +|General Availability +|- -Added/Changed: +|Support for Microsoft Azure Government (MAG) +|General Availability +|- -Known issues: +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-helm-oci[FEATURE_HELM_OCI_SUPPORT] +|Deprecated +|Deprecated -Fixed: +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-ui-database[MySQL and MariaDB database support] +|Deprecated +|Deprecated + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#oci-intro[Open Container Initiative (OCI) Media types] +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/manage_red_hat_quay#clair_crda_configuration[Java scanning with Clair] +|Technology Preview +|Technology Preview + +|Image APIs +|Deprecated +|General Availability +|=== diff --git a/modules/rn_3_70_draft.adoc b/modules/rn_3_70_draft.adoc deleted file mode 100644 index c9172f800..000000000 --- a/modules/rn_3_70_draft.adoc +++ /dev/null @@ -1,97 +0,0 @@ -[[rn-3-700]] -== Version 3.7.0 - -// PROJQUAY-302 -=== Quota - -* link:https://issues.redhat.com/browse/PROJQUAY-302[PROJQUAY-302]. Quota Management and Enforcements - - -==== Details - -* link:https://issues.redhat.com/browse/PROJQUAY-2936[ROJQUAY-2936]. Reporting API and Schema - -* link:https://issues.redhat.com/browse/PROJQUAY-2937[PROJQUAY-2937]. Repository Soft/Hard Limit Implementation - - -// PROJQUAY-465 -=== Proxy - -* link:https://issues.redhat.com/browse/PROJQUAY-465[PROJQUAY-465]. Quay as a cache proxy / pull-through cache for other registries - -==== Design - -* link:https://issues.redhat.com/browse/PROJQUAY-2888[PROJQUAY-2888]. Quay as a cache proxy - Design: Proxy org creation and configuration - -* link:https://issues.redhat.com/browse/PROJQUAY-2889[PROJQUAY-2889]. Quay as a cache proxy - Design: Upstream image pull flow - -* link:https://issues.redhat.com/browse/PROJQUAY-2890[PROJQUAY-2890]. Quay as a cache proxy - Design: Local storage of upstream image layers and manifests - -* link:https://issues.redhat.com/browse/PROJQUAY-2891[PROJQUAY-2891]. Quay as a cache proxy - Design: Quota Management in Cache Proxy org Spike - -==== User stories - -* link:https://issues.redhat.com/browse/PROJQUAY-3029[PROJQUAY-3029]. As a Quay user, I want to create and configure pull-through proxy orgs via UI - -* link:https://issues.redhat.com/browse/PROJQUAY-3030[PROJQUAY-3030]. As a Quay user I want to be able to proxy images through Quay orgs - -* link:https://issues.redhat.com/browse/PROJQUAY-3033[PROJQUAY-3033]. As a Quay user I want proxied images to be stored in Quay so that my pulls are faster - - - - -=== Geo-replication - -* link:https://issues.redhat.com/browse/PROJQUAY-2504[PROJQUAY-2504]. Quay Operator supports geo-replication - - -==== Details - -* link:https://issues.redhat.com/browse/PROJQUAY-3055[PROJQUAY-3055]. Environment variables override -* link:https://issues.redhat.com/browse/PROJQUAY-3056[PROJQUAY-3056]. Scale down quay, clair and mirror -* link:https://issues.redhat.com/browse/PROJQUAY-1723[PROJQUAY-1723]. Multi-cluster deployment of Quay on OpenShift along with all components - -//// - -=== quay / clair / quay-builder - -Added/Changed: - - - -Fixed: - - - -=== quay-operator - -Added/Changed: - - -Fixed: - - - -=== {productname} feature tracker - -New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. - -Some features available in previously releases have been deprecated or remove. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of major functionality deprecated and remove with {productname} 3.7, refer to the table below. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. - -//This will eventually expand to cover the latest three releases. Since this is the first TP tracker, it will include only 3.6. - -.Technology Preview tracker -[cols="2a,2a",options="header"] -|=== -|Feature |Quay 3.7 - - -|=== - -==== Deprecated features - - - -==== Technology preview features - -//// diff --git a/modules/rn_3_80.adoc b/modules/rn_3_80.adoc new file mode 100644 index 000000000..d9632c116 --- /dev/null +++ b/modules/rn_3_80.adoc @@ -0,0 +1,453 @@ +:_content-type: CONCEPT + +[id="rn-3-804"] += RHBA-2023:1188 - {productname} 3.8.4 bug fix update + +Issued 2023-3-14 + +{productname} release 3.8.4 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:1188[RHBA-2023:1188] advisory. + +[id="bug-fixes-384"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-5074[PROJQUAY-5074]. Health checks should check storage engines. +* link:https://issues.redhat.com/browse/PROJQUAY-5117[PROJQUAY-5117]. Quay calls LDAP on robot account login. + +[id="rn-3-803"] += RHBA-2023:0906 - {productname} 3.8.3 bug fix update + +Issued 2023-2-27 + +{productname} release 3.8.3 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:0906[RHBA-2023:0906] advisory. + +[id="bug-fixes-383"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-3643[PROJQUAY-3643]. CVE-2022-24863 quay-registry-container: http-swagger: a denial of service attack consisting of memory exhaustion on the host system [quay-3.7] + +[id="rn-3-802"] += RHBA-2023:0789 - {productname} 3.8.2 bug fix update + +Issued 2023-2-15 + +{productname} release 3.8.2 is now available with Clair 4.6.0. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:0789[RHBA-2023:0789] advisory. + +[id="bug-fixes-382"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-4395[PROJQUAY-4395]. Default value of `false` for `CLEAN_BLOB_UPLOAD_FOLDER` does not make sense. +* link:https://issues.redhat.com/browse/PROJQUAY-4726[PROJQUAY-4726]. No audit logs when superuser trigger and cancel build under normal user's namespace with superuser full access enabled. +* link:https://issues.redhat.com/browse/PROJQUAY-4992[PROJQUAY-4992]. Cleanup deprecated appr code. + +[id="rn-3-801"] += RHBA-2023:0044 - {productname} 3.8.1 bug fix update + +Issued 2023-1-24 + +{productname} release 3.8.1 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:0044[RHBA-2023:0044] advisory. + +[id="bug-fixes-381"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-2164[PROJQUAY-2146]. Combined URLs in security scan report (pointing to errata URL). +* link:https://issues.redhat.com/browse/PROJQUAY-46674667[PROJQUAY-]. Web UI - viewing account results in error. +* link:https://issues.redhat.com/browse/PROJQUAY-4800[PROJQUAY-4800]. Add PUT method to CORS method list. +* link:https://issues.redhat.com/browse/PROJQUAY-4827[PROJQUAY-4857]. Add tracking and cookie content when domain contains Quay.io. +* link:https://issues.redhat.com/browse/PROJQUAY-4527[PROJQUAY-4527]. New UI toggle cannot switch back from new UI to current UI on Apple Safari. +* link:https://issues.redhat.com/browse/PROJQUAY-4663[PROJQUAY-4663]. Pagination for delete repository modal not showing correct values. +* link:https://issues.redhat.com/browse/PROJQUAY-4765[PROJQUAY-4765]. Quay 3.8.0 superuser does not have permission to add new team member to normal user's team when enabled superuser full access. + +[id="rn-3-800"] += RHBA-2022:6976 - {productname} 3.8.0 release + +Issued 2022-12-6 + +{productname} release 3.8.0 is now available with Clair 4.5.1. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:6976[RHBA-2022:6976] advisory. + +[id="new-features-and-enhancements-38"] +== {productname}, Clair, and Quay Builder new features and enhancements + +The following updates have been made to {productname}, Clair, and Quay Builders: + +* Previously, {productname} only supported the IPv4 protocol family. IPv6 support is now available in {productname} {producty} standalone deployments. Additionally, dual-stack (IPv4/IPv6) support is available. ++ +.Network protocol support +[cols="2,1,1",options="header"] +|=============================================================== +| Protocol family | {productname} 3.7 | {productname} 3.8 +| IPv4 | ✓ | ✓ +| IPv6 | | ✓ +| Dual-stack (IPv4/IPv6) | | ✓ + +|=============================================================== ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-272[PROJQUAY-272]. ++ +For a list of known limitations, see xref:ipv6-limitations-38[IPv6 and dual-stack limitations]. + +* Previously, {productname} did not require self-signed certificates to use Subject Alternative Names (SANs). {productname} users could temporarily enable Common Name matching with `GODEBUG=x509ignoreCN=0` to bypass the required certificate. ++ +With {productname} 3.8, {productname} has been upgraded to use Go version 1.17. As a result, setting `GODEBUG=x509ignoreCN=0` no longer works. Users must include self-signed certificates to use SAN. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-1605[PROJQUAY-1605]. + +* The following enhancements have been made to the {productname} proxy cache feature: + +** Previously, the cache of a proxy organization with quota management enabled could reach full capacity. As a result, pulls for new images could be prevented until an administrator cleaned up the cached images. ++ +With this update, {productname} administrators can now use the storage quota of an organization to limit the cache size. Limiting the cache size ensures that backend storage consumption remains predictable by discarding images from the cache according to the pull frequency or overall usage of an image. As a result, the storage size allotted by quota management always stays within its limits. ++ +For more information, see https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/use_red_hat_quay#proxy-cache-leveraging-storage-quota-limits[Leveraging storage quota limits in proxy organizations]. + +** Previously, when mirroring a repository, an image with the `latest` tag must have existed in the remote repository. This requirement has been removed. Now, an image with the `latest` tag is no longer required, and you do not need to specify an existing tag explicitly. ++ +For more information on this update, see link:https://issues.redhat.com/browse/PROJQUAY-2179[PROJQUAY-2179]. ++ +For more information on tag patterns, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html/manage_red_hat_quay/repo-mirroring-in-red-hat-quay#mirroring-tag-patterns[Mirroring tag patterns]. + +* {productname} 3.8 now includes support for the following Open Container Initiative (OCI) image media types: +** Software Packadage Data Exchange (SPDX) +** Syft +** CycloneDX ++ +These can be configured by the users in their `config.yaml` file, for example: ++ +.config.yaml +[source,yaml] +---- +... +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.syft+json + application/vnd.cyclonedx + application/vnd.cyclonedx+xml + application/vnd.cyclonedx+json + application/vnd.in-toto+json +... +---- ++ +[NOTE] +==== +When adding OCI media types that are not configured by default, users will also need to manually add support for cosign and Helm if desired. The ztsd compression scheme is supported by default, so users will not need to add that OCI media type to their config.yaml to enable support. +==== + +== New {productname} configuration fields + +* The following configuration field has been added to test {productname}'s new user interface: + +** **FEATURE_UI_V2**: With this configuration field, users can test the beta UI environment. ++ +*Default*: `False` ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[v2 user interface configuration]. + +* The following configuration fields have been added to enhance the {productname} registry: + +** **FEATURE_LISTEN_IP_VERSION**: This configuration field allows users to set the protocol family to IPv4, IPv6, or dual-stack. This configuration field must be properly set, otherwise {productname} fails to start. ++ +*Default*: `IPv4` ++ +*Additional configurations*: `IPv6`, `dual-stack` ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#config-fields-ipv6[IPv6 configuration field]. + +* The following configuration fields have been added to enhance Lightweight Directory Access Protocol (LDAP) deployments: + +** **LDAP_SUPERUSER_FILTER**: This configuration field is a subset of the `LDAP_USER_FILTER` configuration field. It allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as superusers when {productname} users select LDAP as their authentication provider. ++ +With this field, administrators can add or remove superusers without having to update the {productname} configuration file and restart their deployment. ++ +This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-ldap-super-user[LDAP superuser configuration reference]. + +** **LDAP_RESTRICTED_USER_FILTER**: This configuration field is a subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. ++ +This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-ldap-restricted-user[LDAP restricted user configuration]. + +* The following configuration fields have been added to enhance the superuser role: + +** **FEATURE_SUPERUSERS_FULL_ACCESS**: This configuration field grants superusers the ability to read, write, and delete content from other repositories in namespaces that they do not own or have explicit permissions for. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-superusers-full-access[FEATURE_SUPERUSERS_FULL_ACCESS configuration reference]. + +** **GLOBAL_READONLY_SUPER_USERS**: This configuration field grants users of this list read access to all repositories, regardless of whether they are public repositories. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-global-readonly-super-users[GLOBAL_READONLY_SUPER_USERS configuration reference]. ++ +[NOTE] +==== +In its current state, this feature only allows designated users to pull content from all repositories. Administrative restrictions will be added in a future version of {productname}. +==== + +* The following configuration fields have been added to enhance user permissions: + +** **FEATURE_RESTRICTED_USERS**: When set with `RESTRICTED_USERS_WHITELIST`, restricted users cannot create organizations or content in their own namespace. Normal permissions apply for an organization's membership, for example, a restricted user will still have normal permissions in organizations based on the teams that they are members of. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-feature-restricted-users[ FEATURE_RESTRICTED_USERS configuration reference]. + +** **RESTRICTED_USERS_WHITELIST**: When set with `FEATURE_RESTRICTED_USERS: true`, administrators can exclude users from the `FEATURE_RESTRICTED_USERS` setting. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-restricted-users-whitelist[RESTRICTED_USERS_WHITELIST configuration reference]. + +[id="quay-operator-updates"] +== {productname} Operator + +The following updates have been made to the {productname} Operator: + +* Previously, the {productname} Operator only supported the IPv4 protocol family. IPv6 support is now available in {productname} {producty} Operator deployments. ++ +.Network protocol support +[cols="1,1,1",options="header"] +|=============================================================== +| Protocol family | {productname} 3.7 Operator | {productname} 3.8 Operator +| IPv4 | ✓ | ✓ +| IPv6 | | ✓ +| Dual-stack (IPv4/IPv6) | | + +|=============================================================== ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-272[PROJQUAY-272]. ++ +For a list of known limitations, see xref:ipv6-limitations-38[IPv6 and dual-stack limitations]. + +[id="known-issues-and-limitations-38"] +== {productname} 3.8 known issues and limitations + +[id="known-issues-38"] +=== Known issues: + +* The `metadata_json` column in the `logentry3` table on MySQL deployments has a limited size of `TEXT`. Currently, the default size of the column set to be `TEXT` is 65535 bytes. 65535 bytes is not big enough for some mirror logs when debugging is turned `off`. When a statement containing `TEXT` larger than 65535 bytes is sent to MySQL, the data sent is truncated to fit into the 65535 boundary. Consequently, this creates issues when the `metadata_json` object is decoded, and the decode fails because the string is not terminated properly. As a result, {productname} returns a 500 error. ++ +There is currently no workaround for this issue, and it will be addressed in a future version of {productname}. For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4305[PROJQUAY-4305]. + +* There is a known issue when using the `--sign-by-sigstore-private-key` flag with some versions of Podman v4.y.z or greater. When the flag is used, the following error is returned: `Error: writing signatures: writing sigstore attachments is disabled by configuration`. To use this flag with Podman v4, your version must be v4.2.1; versions prior to 4.2.1 return the aforementioned error. There is currently no workaround for this issue, and it will be addressed in a future version of Podman. + +* Currently, when pushing images with the Cosign private key `sigstore` with Podman 4, the following error is returned: `Error: received unexpected HTTP status: 500 Internal Server Error`. This is a known issue and will be fixed in a future version of Podman. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4588[PROJQUAY-4588]. + +* There is a known issue when using the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field with the {productname} UI v2. When this field is set, all superuser actions on tenant content should be audited. Currently, when a superuser deletes an existing organization that is owned by a normal user, there is no way to audit that operation. This will be fixed in a future version of {productname}. + +* There is a known issue when using the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field with the {productname} UI v2. When setting this field to `true` in your config.yaml file, {productname} superusers can view organizations created by normal users, but cannot see the image repository. As a temporary workaround, superusers can view those repositories by navigating to them from the *Organizations* page. This will be fixed in a future version of {productname}. + +* When setting the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field to `true`, superusers do not have permission to create a new image repository under a normal user's organization. This is a known issue and will be fixed in a future version of {productname}. + +* When running {productname} in the old UI, timed-out sessions would require that a superuser input their password again in the pop-up window. With the new UI, superusers are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. + +* When `FEATURE_RESTRICTED_USERS` is set to `true`, superusers are unable to create new organizations. This is a known issue and will be fixed in a future version of {productname}. + +* If `FEATURE_RESTRICTED_USERS` or `LDAP_RESTRICTED_USER_FILTER` are set with a user, for example, `user1`, and the same user is also a superuser, they will not be able to create new organizations. This is a known issue. The superuser configuration field should take precedence over the restricted user configuration, however this is also an invalid configuration. {productname} administrators should not set the same user as both a restricted user and a superuser. This will be fixed in a future version of {productname} so that the superuser configuration field takes precedence over the restricted user field. + +* After selecting *Enable Storage Replication* in the {productname} configuration editor and reconfiguring your {productname} deployment, the new `Quay` and `Mirror` pods fail to start. This error occurs because the `Quay` and `Mirror` pods rely on the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable, which is now unsupported in {productname} {producty}. ++ +As a temporary workaround, you must update the `QuayRegistry` `config.yaml` file manually to include the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable, for example: ++ +[source,yaml] +---- + spec: + components: + - kind: clair + managed: true + - kind: postgres + managed: true + - kind: objectstorage + managed: false + - kind: redis + managed: true + - kind: horizontalpodautoscaler + managed: true + - kind: route + managed: true + - kind: mirror + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: local_us + - kind: monitoring + managed: false + - kind: tls + managed: true + - kind: quay + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: local_us + - kind: clairpostgres + managed: true +---- ++ +This is a known issue and will be fixed in a future version of {productname}. + +* When configuring {productname} AWS S3 Cloudfront, a new parameter, `s3_region` is required. Currently, the {productname} config editor does not include this field. As a temporary workaround, you must manually insert the `s3_region` parameter in your `config.yaml` file, for example: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - CloudFrontedS3Storage + - cloudfront_distribution_domain: + cloudfront_distribution_org_overrides: {} + cloudfront_key_id: > ssl.cert +---- + +. Stop the `Quay` container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman stop quay +---- + +. Restart the registry by entering the following command: + [subs="verbatim,attributes"] -``` -$ sudo podman rm -f quay +---- + $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} - -``` - +---- \ No newline at end of file diff --git a/modules/ssl-config-ui.adoc b/modules/ssl-config-ui.adoc index 60f268f4b..c61ae52b3 100644 --- a/modules/ssl-config-ui.adoc +++ b/modules/ssl-config-ui.adoc @@ -1,25 +1,36 @@ -= Configuring SSL using the UI +:_content-type: PROCEDURE +[id="configuring-ssl-using-ui"] += Configuring SSL/TLS using the {productname} UI -This section configures SSL using the Quay UI. To configure SSL using the command line interface, see the following section. +Use the following procedure to configure SSL/TLS using the {productname} UI. + +To configure SSL using the command line interface, see "Configuring SSL/TLS using the command line interface". + +.Prerequisites + +* You have created a certificate authority and signed the certificate. + +.Procedure . Start the `Quay` container in configuration mode: + [subs="verbatim,attributes"] -``` +---- $ sudo podman run --rm -it --name quay_config -p 80:8080 -p 443:8443 {productrepo}/{quayimage}:{productminv} config secret -``` +---- + +. In the *Server Configuration* section, select *{productname} handles TLS* for SSL/TLS. Upload the certificate file and private key file created earlier, ensuring that the *Server Hostname* matches the value used when the certificates were created. -. In the Server Configuration section, select `Red Hat Quay handles TLS` for TLS. Upload the certificate file and private key file created earlier, ensuring that the Server Hostname matches the value used when creating the certs. Validate and download the updated configuration. +. Validate and download the updated configuration. -. Stop the `Quay` container and then restart the registry: +. Stop the `Quay` container and then restart the registry by entering the following command: + [subs="verbatim,attributes"] -``` +---- $ sudo podman rm -f quay $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} - -``` +---- diff --git a/modules/ssl-create-certs.adoc b/modules/ssl-create-certs.adoc index b3fafd0e4..5b8b54671 100644 --- a/modules/ssl-create-certs.adoc +++ b/modules/ssl-create-certs.adoc @@ -1,62 +1,79 @@ -[[create-a-ca-and-sign-a-certificate]] -= Create a Certificate Authority and sign a certificate +:_content-type: PROCEDURE +[id="create-a-ca-and-sign-a-certificate"] += Creating a certificate authority and signing a certificate -At the end of this procedure, you will have a certificate file and a primary key file named `ssl.cert` and `ssl.key`, respectively. +Use the following procedures to create a certificate file and a primary key file named `ssl.cert` and `ssl.key`. -== Create a Certificate Authority +[id="creating-a-certificate-authority"] +== Creating a certificate authority -. Generate the root CA key: +Use the following procedure to create a certificate authority (CA) + +.Procedure + +. Generate the root CA key by entering the following command: + -``` +[source,terminal] +---- $ openssl genrsa -out rootCA.key 2048 -``` +---- -. Generate the root CA cert: +. Generate the root CA certificate by entering the following command: + -``` +[source,terminal] +---- $ openssl req -x509 -new -nodes -key rootCA.key -sha256 -days 1024 -out rootCA.pem -``` +---- . Enter the information that will be incorporated into your certificate request, including the server hostname, for example: + -``` +[source,terminal] +---- Country Name (2 letter code) [XX]:IE State or Province Name (full name) []:GALWAY Locality Name (eg, city) [Default City]:GALWAY Organization Name (eg, company) [Default Company Ltd]:QUAY Organizational Unit Name (eg, section) []:DOCS Common Name (eg, your name or your server's hostname) []:quay-server.example.com -``` +---- + +[id="signing-a-certificate"] +== Signing a certificate -== Sign a certificate +Use the following procedure to sign a certificate. -. Generate the server key: +.Procedure + +. Generate the server key by entering the following command: + -``` +[source,terminal] +---- $ openssl genrsa -out ssl.key 2048 -``` +---- -. Generate a signing request: +. Generate a signing request by entering the following command: + -``` +[source,terminal] +---- $ openssl req -new -key ssl.key -out ssl.csr -``` +---- . Enter the information that will be incorporated into your certificate request, including the server hostname, for example: + -``` +[source,terminal] +---- Country Name (2 letter code) [XX]:IE State or Province Name (full name) []:GALWAY Locality Name (eg, city) [Default City]:GALWAY Organization Name (eg, company) [Default Company Ltd]:QUAY Organizational Unit Name (eg, section) []:DOCS Common Name (eg, your name or your server's hostname) []:quay-server.example.com -``` +---- -. Create a configuration file `openssl.cnf`, specifying the server hostname, for example: +. Create a configuration file `openssl.cnf`, specifying the server hostname, for example: + .openssl.cnf -[source] +[source,terminal] ---- [req] req_extensions = v3_req @@ -71,9 +88,9 @@ DNS.1 = quay-server.example.com IP.1 = 192.168.1.112 ---- - . Use the configuration file to generate the certificate `ssl.cert`: + -``` +[source,terminal] +---- $ openssl x509 -req -in ssl.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out ssl.cert -days 356 -extensions v3_req -extfile openssl.cnf -``` +---- \ No newline at end of file diff --git a/modules/ssl-intro.adoc b/modules/ssl-intro.adoc index 6fe30a74a..920406c0d 100644 --- a/modules/ssl-intro.adoc +++ b/modules/ssl-intro.adoc @@ -1,13 +1,17 @@ -= Introduction to using SSL +[id="introduction-using-ssl"] += Using SSL/TLS -To configure {productname} with a -https://en.wikipedia.org/wiki/Self-signed_certificate[self-signed -certificate], you need to create a Certificate Authority (CA) and then generate the required key and certificate files. +To configure {productname} with a https://en.wikipedia.org/wiki/Self-signed_certificate[self-signed +certificate], you must create a Certificate Authority (CA) and then generate the required key and certificate files. +[NOTE] +==== The following examples assume you have configured the server hostname `quay-server.example.com` using DNS or another naming mechanism, such as adding an entry in your `/etc/hosts` file: -.... +[source,terminal] +---- $ cat /etc/hosts ... 192.168.1.112 quay-server.example.com -.... \ No newline at end of file +---- +==== \ No newline at end of file diff --git a/modules/standalone-deployment-backup-restore.adoc b/modules/standalone-deployment-backup-restore.adoc new file mode 100644 index 000000000..de403a72e --- /dev/null +++ b/modules/standalone-deployment-backup-restore.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="standalone-deployment-backup-restore"] += Backing up and restoring {productname} on a standalone deployment + +Use the content within this section to back up and restore {productname} in standalone deployments. \ No newline at end of file diff --git a/modules/standalone-to-operator-backup-restore.adoc b/modules/standalone-to-operator-backup-restore.adoc new file mode 100644 index 000000000..4965104df --- /dev/null +++ b/modules/standalone-to-operator-backup-restore.adoc @@ -0,0 +1,375 @@ += Migrating a standalone Quay deployment to a {productname} Operator managed deployment + +The following procedures allow you to back up a standalone {productname} deployment and migrate it to the {productname} Operator on OpenShift Container Platform. + +== Backing up a standalone deployment of {productname} + +.Prerequisites + +.Procedure + +. Back up the Quay `config.yaml` of your standalone deployment: ++ +[source,terminal] +---- +$ mkdir /tmp/quay-backup +$ cp /path/to/Quay/config/directory/config.yaml /tmp/quay-backup +---- + +. Create a backup of the database that your standalone Quay deployment is using: ++ +[source,terminal] +---- +$ pg_dump -h DB_HOST -p 5432 -d QUAY_DATABASE_NAME -U QUAY_DATABASE_USER -W -O > /tmp/quay-backup/quay-database-backup.sql +---- + +. Install the link:https://docs.aws.amazon.com/cli/v1/userguide/install-linux.html#install-linux-bundled-sudo[AWS CLI] if you do not have it already. + +. Create an `~/.aws/` directory: ++ +[source,terminal] +---- +$ mkdir ~/.aws/ +---- + +. Obtain the `access_key` and `secret_key` from the Quay `config.yaml` of your standalone deployment: ++ +[source,terminal] +---- +$ grep -i DISTRIBUTED_STORAGE_CONFIG -A10 /tmp/quay-backup/config.yaml +---- ++ +Example output: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + minio-1: + - RadosGWStorage + - access_key: ########## + bucket_name: quay + hostname: 172.24.10.50 + is_secure: false + port: "9000" + secret_key: ########## + storage_path: /datastorage/registry +---- + +. Store the `access_key` and `secret_key` from the Quay `config.yaml` file in your `~/.aws` directory: ++ +[source,terminal] +---- +$ touch ~/.aws/credentials +---- + +. Optional: Check that your `access_key` and `secret_key` are stored: ++ +[source,terminal] +---- +$ cat > ~/.aws/credentials << EOF +[default] +aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG +aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG +EOF +---- ++ +Example output: ++ +[source,terminal] +---- +aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG +aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG +---- ++ +[NOTE] +==== +If the `aws cli` does not automatically collect the `access_key` and `secret_key` from the ``~/.aws/credentials file`, you can, you can configure these by running `aws configure` and manually inputting the credentials. +==== + +. In your `quay-backup` directory, create a `bucket_backup` directory: ++ +[source,terminal] +---- +$ mkdir /tmp/quay-backup/bucket-backup +---- + +. Backup all blobs from the S3 storage: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl --endpoint-url https://PUBLIC_S3_ENDPOINT:PORT s3://QUAY_BUCKET/ /tmp/quay-backup/bucket-backup/ +---- ++ +[NOTE] +==== +The `PUBLIC_S3_ENDPOINT` can be read from the Quay `config.yaml` file under `hostname` in the `DISTRIBUTED_STORAGE_CONFIG`. If the endpoint is insecure, use `http` instead of `https` in the endpoint URL. +==== + +Up to this point, you should have a complete backup of all Quay data, blobs, the database, and the `config.yaml` file stored locally. In the following section, you will migrate the standalone deployment backup to {productname} on OpenShift Container Platform. + +== Using backed up standalone content to migrate to OpenShift Container Platform. + + +.Prerequisites + +* Your standalone {productname} data, blobs, database, and `config.yaml` have been backed up. +* {productname} is deployed on OpenShift Container Platform using the Quay Operator. +* A `QuayRegistry` with all components set to `managed`. + +.Procedure + +[NOTE] +==== +The procedure in this documents uses the following namespace: `quay-enterprise`. +==== + +. Scale down the {productname} Operator: ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment quay-operator.v3.6.2 -n openshift-operators +---- + +. Scale down the application and mirror deployments: ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment QUAY_MAIN_APP_DEPLOYMENT QUAY_MIRROR_DEPLOYMENT +---- + +. Copy the database SQL backup to the Quay PostgreSQL database instance: ++ +[source,terminal] +---- +$ oc cp /tmp/user/quay-backup/quay-database-backup.sql quay-enterprise/quayregistry-quay-database-54956cdd54-p7b2w:/var/lib/pgsql/data/userdata +---- + + +. Obtain the database password from the Operator-created `config.yaml` file: ++ +[source,terminal] +---- +$ oc get deployment quay-quay-app -o json | jq '.spec.template.spec.volumes[].projected.sources' | grep -i config-secret +---- ++ +Example output: ++ +[source,yaml] +---- + "name": "QUAY_CONFIG_SECRET_NAME" +---- ++ +[source,terminal] +---- +$ oc get secret quay-quay-config-secret-9t77hb84tb -o json | jq '.data."config.yaml"' | cut -d '"' -f2 | base64 -d -w0 > /tmp/quay-backup/operator-quay-config-yaml-backup.yaml +---- ++ +[source,terminal] +---- +cat /tmp/quay-backup/operator-quay-config-yaml-backup.yaml | grep -i DB_URI +---- ++ +Example output: ++ +---- +postgresql://QUAY_DATABASE_OWNER:PASSWORD@DATABASE_HOST/QUAY_DATABASE_NAME +---- + +. Execute a shell inside of the database pod: ++ +[source,terminal] +---- +# oc exec -it quay-postgresql-database-pod -- /bin/bash +---- + +. Enter psql: ++ +[source,terminal] +---- +bash-4.4$ psql +---- + +. Drop the database: ++ +[source,terminal] +---- +postgres=# DROP DATABASE "example-restore-registry-quay-database"; +---- ++ +Example output: ++ +---- +DROP DATABASE +---- + +. Create a new database and set the owner as the same name: ++ +[source,terminal] +---- +postgres=# CREATE DATABASE "example-restore-registry-quay-database" OWNER "example-restore-registry-quay-database"; +---- ++ +Example output: ++ +---- +CREATE DATABASE +---- + +. Connect to the database: ++ +[source,terminal] +---- +postgres=# \c "example-restore-registry-quay-database"; +---- ++ +Example output: ++ +[source,terminal] +---- +You are now connected to database "example-restore-registry-quay-database" as user "postgres". +---- + +. Create a `pg_trmg` extension of your Quay database: ++ +[source,terminal] +---- +example-restore-registry-quay-database=# create extension pg_trgm ; +---- ++ +Example output: ++ +[source,terminal] +---- +CREATE EXTENSION +---- + +. Exit the postgres CLI to re-enter bash-4.4: ++ +[source,terminal] +---- +\q +---- + +. Set the password for your PostgreSQL deployment: ++ +[source,terminal] +---- +bash-4.4$ psql -h localhost -d "QUAY_DATABASE_NAME" -U QUAY_DATABASE_OWNER -W < /var/lib/pgsql/data/userdata/quay-database-backup.sql +---- ++ +Example output: ++ +---- +SET +SET +SET +SET +SET +---- + +. Exit bash mode: ++ +[source,terminal] +---- +bash-4.4$ exit +---- + +. Create a new configuration bundle for the {productname} Operator. ++ +[source,terminal] +---- +$ touch config-bundle.yaml +---- + +. In your new `config-bundle.yaml`, include all of the information that the registry requires, such as LDAP configuration, keys, and other modifications that your old registry had. Run the following command to move the `secret_key` to your `config-bundle.yaml`: ++ +[source,terminal] +---- +$ cat /tmp/quay-backup/config.yaml | grep SECRET_KEY > /tmp/quay-backup/config-bundle.yaml +---- ++ +[NOTE] +==== +You must manually copy all the LDAP, OIDC and other information and add it to the /tmp/quay-backup/config-bundle.yaml file. +==== + +. Create a configuration bundle secret inside of your OpenShift cluster: ++ +[source,terminal] +---- +$ oc create secret generic new-custom-config-bundle --from-file=config.yaml=/tmp/quay-backup/config-bundle.yaml +---- + +. Scale up the Quay pods: ++ +---- +$ oc scale --replicas=1 deployment quayregistry-quay-app +deployment.apps/quayregistry-quay-app scaled +---- + +. Scale up the mirror pods: ++ +---- +$ oc scale --replicas=1 deployment quayregistry-quay-mirror +deployment.apps/quayregistry-quay-mirror scaled +---- + +. Patch the `QuayRegistry` CRD so that it contains the reference to the new custom configuration bundle: ++ +---- +$ oc patch quayregistry QUAY_REGISTRY_NAME --type=merge -p '{"spec":{"configBundleSecret":"new-custom-config-bundle"}}' +---- ++ +[NOTE] +==== +If Quay returns a `500` internal server error, you might have to update the `location` of your `DISTRIBUTED_STORAGE_CONFIG` to `default`. +==== + +. Create a new AWS `credentials.yaml` in your `/.aws/` directory and include the `access_key` and `secret_key` from the Operator-created `config.yaml` file: ++ +[source,terminal] +---- +$ touch credentials.yaml +---- ++ +[source,terminal] +---- +$ grep -i DISTRIBUTED_STORAGE_CONFIG -A10 /tmp/quay-backup/operator-quay-config-yaml-backup.yaml +---- ++ +[source,terminal] +---- +$ cat > ~/.aws/credentials << EOF +[default] +aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG +aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG +EOF +---- ++ +[NOTE] +==== +If the `aws cli` does not automatically collect the `access_key` and `secret_key` from the ``~/.aws/credentials file`, you can configure these by running `aws configure` and manually inputting the credentials. +==== + +. Record the NooBaa's publicly available endpoint: ++ +[source,terminal] +---- +$ oc get route s3 -n openshift-storage -o yaml -o jsonpath="{.spec.host}{'\n'}" +---- + +. Sync the backup data to the NooBaa backend storage: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl --endpoint-url https://NOOBAA_PUBLIC_S3_ROUTE /tmp/quay-backup/bucket-backup/* s3://QUAY_DATASTORE_BUCKET_NAME +---- + +. Scale the Operator back up to 1 pod: ++ +[source,terminal] +---- +$ oc scale –replicas=1 deployment quay-operator.v3.6.4 -n openshift-operators +---- + +The Operator will use the custom configuration bundle provided and will reconcile all secrets and deployments. Your new Quay deployment on OpenShift Container Platform should contain all of the information that the old deployment had. All images should be pull-able. diff --git a/modules/subscription-intro.adoc b/modules/subscription-intro.adoc index 02f69559d..4571a2daa 100644 --- a/modules/subscription-intro.adoc +++ b/modules/subscription-intro.adoc @@ -1,26 +1,27 @@ -[[subscription-intro]] -= {productname} subscription information +:_content-type: CONCEPT +[id="subscription-intro"] += {productname} subscription information -{productname} is available with Standard or Premium support, and subscriptions are based on deployments. +{productname} is available with Standard or Premium support, and subscriptions are based on deployments. [NOTE] ==== -Deployment means an installation of a single {productname} registry using a shared data backend. +Deployment means an installation of a single {productname} registry using a shared data backend. ==== -With a {productname} subscription: +With a {productname} subscription, the following options are available: -* There is no limit on the number of pods (Quay, Clair, Builder, etc.) you can deploy. -* Quay pods can run in multiple data centers or availability zones. -* Storage and database backends can be deployed across multiple data centers or availability zones, but only as a single, shared storage backend and single, shared database backend. -* Quay can manage content for an unlimited number of clusters or standalone servers. -* Clients can access to the Quay deployment irrespective of their physical location. -* You can deploy Quay on OpenShift infrastructure nodes to minimize subscription requirements. -* You can run the Container Security Operator (CSO) and the Quay Bridge Operator (QBO) on your OpenShift clusters at no additional cost. +* There is no limit on the number of pods, such as Quay, Clair, Builder, and so on, that you can deploy. +* {productname} pods can run in multiple data centers or availability zones. +* Storage and database backends can be deployed across multiple data centers or availability zones, but only as a single, shared storage backend and single, shared database backend. +* {productname} can manage content for an unlimited number of clusters or standalone servers. +* Clients can access the {productname} deployment regardless of their physical location. +* You can deploy {productname} on {ocp} infrastructure nodes to minimize subscription requirements. +* You can run the Container Security Operator (CSO) and the Quay Bridge Operator (QBO) on your {ocp} clusters at no additional cost. [NOTE] ==== -{productname} geo-replication requires a subscription for each storage replication. The database, however, is shared. -==== +{productname} geo-replication requires a subscription for each storage replication. The database, however, is shared. +==== -For more information on purchasing a {productname} subscription, see link:https://www.redhat.com/en/technologies/cloud-computing/quay[{productname}]. +For more information about purchasing a {productname} subscription, see link:https://www.redhat.com/en/technologies/cloud-computing/quay[{productname}]. diff --git a/modules/tenancy-model.adoc b/modules/tenancy-model.adoc index 7f6c473bd..ef699dd23 100644 --- a/modules/tenancy-model.adoc +++ b/modules/tenancy-model.adoc @@ -3,10 +3,10 @@ image:178_Quay_architecture_0821_tenancy_model.png[Quay tenancy model] -* **Organizations** provide a way of sharing repositories under a common namespace that does not belong to a single user, but rather to many users in a shared setting (such as a company). -* **Teams** provide a way for an organization to delegate permissions (both global and on specific repositories) to sets or groups of users -* **Users** can log in to a registry through the Quay web UI or a client (such as `podman login`). Each users automatically gets a user namespace, for example, `quay-server.example.com/user/` -* **Super users** have enhanced access and privileges via the Super User Admin Panel in the user interface and through Super User API calls that are not visible or accessible to normal users +* **Organizations** provide a way of sharing repositories under a common namespace which does not belong to a single user, but rather to many users in a shared setting (such as a company). +* **Teams** provide a way for an organization to delegate permissions (both global and on specific repositories) to sets or groups of users. +* **Users** can log in to a registry through the {productname} web UI or a client (such as `podman login`). Each user automatically gets a user namespace, for example, `quay-server.example.com/user/`. +* **Super users** have enhanced access and privileges via the Super User Admin Panel in the user interface and through Super User API calls that are not visible or accessible to normal users. * **Robot accounts** provide automated access to repositories for non-human users such as pipeline tools and are similar in nature to OpenShift service accounts. Permissions can be granted to a robot account in a repository by adding that account like any other user or team. diff --git a/modules/testing-3-800.adoc b/modules/testing-3-800.adoc new file mode 100644 index 000000000..7153a3b69 --- /dev/null +++ b/modules/testing-3-800.adoc @@ -0,0 +1,910 @@ +[[testing-3-800]] +== Testing 3.8.0 features + +The following sections in this guide explain how to enable new features and test that they are working. + +[[enabling-ipv6-dual-stack]] +=== Enabling and testing the IPv6 and dual-stack protocol family on standalone {productname} deployments + +Your {productname} deployment can now be served in locations that only support IPv6, such as Telco and Edge environments. Support is also offered for dual-stack networking so your {productname} deployment can listen on IPv4 and IPv6 simultaneously. + +[[enabling-ipv6]] +==== Enabling and testing IPv6 + +Use the following procedure to enable IPv6 on your standalone {productname} deployment. + +.Prerequisites + +* You have updated {productname} to 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `IPv6`, for example: ++ +[source,yaml] +---- +--- +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: IPv6 +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +--- +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to IPv6. ++ +.. For a standalone deployment, enter the following command: ++ +[source,terminal] +---- +$ curl /health/instance +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- ++ +.. For an Operator based deployment, enter the following command: ++ +[source,terminal] +---- +$ curl /health/instance +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +===== Expected Results + +After enabling IPv6 in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured to use IPv6 and is not hindered by the ipv6-limitations[current limitations]. + +[WARNING] +==== +If your environment is configured to IPv4, but the `FEATURE_LISTEN_IP_VERSION` configuration field is set to `IPv6`, {productname} will fail to deploy. +==== + +You can use the following procedure to test that your {productname} deployment can push and pull images in an IPv6 environment. + +.Procedure + +. Pull a sample image from an external registry: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your {productname} registry: ++ +[source,terminal] +---- +$ podman push --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- + +. Test access to the image from the CLI by deleting your local copy of the image: ++ +[source,terminal] +---- +$ podman rmi quay-server.example.com/quayadmin/busybox:test +---- + +. Pull the image from your {productname} registry: ++ +[source,terminal] +---- +$ podman pull --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- + +[[enabling-dual-stack]] +==== Enabling and testing dual-stack + +.Prerequisites + +* You have updated {productname} to 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `dual-stack`, for example: ++ +[source,yaml] +---- +--- +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: dual-stack +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +--- +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to both channels by entering the following command: +.. For IPv4, enter the following command: ++ +[source,terminal] +---- +$ curl --ipv4 +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- +.. For IPv6, enter the following command: ++ +[source,terminal] +---- +$ curl --ipv6 +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +===== Expected Results + +After enabling dual-stack in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured for dual-stack. + +You can use the following procedure to test that your {productname} deployment can push and pull images in a dual-stack environment. + +.Procedure + +. Pull a sample image from an external registry: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your {productname} registry: ++ +[source,terminal] +---- +$ podman push --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- + +. Test access to the image from the CLI by deleting your local copy of the image: ++ +[source,terminal] +---- +$ podman rmi quay-server.example.com/quayadmin/busybox:test +---- + +. Pull the image from your {productname} registry: ++ +[source,terminal] +---- +$ podman pull --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- + +[[enabling-ldap-super-users]] +=== Enabling LDAD superusers for {productname} + +The `LDAP_SUPERUSER_FILTER` configuration field is now available. With this field configured, {productname} administrators can configure Lightweight Directory Access Protocol (LDAP) users as superusers if {productname} uses LDAP as its authentication provider. + +Use the following procedure to enable LDAP superusers on your {productname} deployment. + +.Prerequisites + +* Your {productname} deployment uses LDAP as its authentication provider. +* You have configured the `LDAP_USER_FILTER` field. + +.Procedure + +. In your deployment's `config.yaml` file, add the `LDAP_SUPERUSER_FILTER` parameter and add the group of users you want configured as super users, for example, `root`: ++ +[source,yaml] +---- +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_SUPERUSER_FILTER: (memberOf=cn=root,ou=Admin,o=,dc=,dc=com) +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +---- + +. Start, or restart, your {productname} deployment. + +===== Expected Results + +After enabling the `LDAP_SUPERUSER_FILTER` feature, your LDAP {productname} users have superuser privileges. The following options are available to superusers: + +* Manage users +* Manage organizations +* Manage service keys +* View the change log +* Query the usage logs +* Create globally visible user messages + +Use the following procedure to test that your {productname} LDAP users have been given superusers privileges. + +.Prerequisites + +* You have configured the `LDAP_SUPERUSER_FILTER` field. + +.Procedure + +. Log in to your {productname} registry as the configured LDAP superuser. + +. Access the *Super User Admin Panel* by clicking on your user name or avatar in the top right-hand corner of the UI. If you have been properly configured as a superuser, an extra item is presented in the drop-down list called *Super User Admin Panel*. + +. On the *{productname} Management* page, click *Globally visible user messages* on the navigation pane. + +. Click *Create Message* to reveal a drop-down menu containing *Normal*, *Warning*, and *Error* message types. + +. Enter a message by selecting *Click to set message*, then click *Create Message*. + +Now, when users log in to the {productname} registry, they are presented with a global message. + + +[[enabling-ldap-restricted-users]] +=== Enabling LDAP restricted users for {productname} + +The `LDAP_RESTRICTED_USER_FILTER` is now available. This configuration field is a subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. + +Use the following procedure to enable LDAP restricted users on your {productname} deployment. + +.Prerequisites + +* Your {productname} deployment uses LDAP as its authentication provider. +* You have configured the `LDAP_USER_FILTER` field. + +.Procedure + +. In your deployment's `config.yaml` file, add the `LDAP_RESTRICTED_USER_FILTER` parameter and specify the group of restricted users, for example, `members`: ++ +[source,yaml] +---- +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_RESTRICTED_USER_FILTER: (=) +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +---- + +. Start, or restart, your {productname} deployment. + + +[[enabling-superusers-full-access]] +=== Enabling and testing `FEATURE_SUPERUSERS_FULL_ACCESS` + +The `FEATURE_SUPERUSERS_FULL_ACCESS` feature is now available. This configuration field grants superusers the ability to read, write, and delete content from other repositories in namespaces that they do not own or have explicit permissions for. + +[NOTE] +==== +* This feature is only available on the beta of the new UI. When enabled, it shows all organizations that the super user has access to. To enable the beta of the new UI, see xref:enabling-ui-v2[FEATURE_UI_V2] +* When this field is enabled, the superuser cannot view the image repository of every organization at once. This is a known limitation and will be fixed in a future version of {productname}. As a temporary workaround, the superuser can view image repositories by navigating to them from the *Organizations* page. +==== + + +Use the following procedure to test the `FEATURE_SUPERUSERS_FULL_ACCESS` feature. + +.Prerequisites + +* You have defined the `SUPER_USERS` configuration field in your `config.yaml` file. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_SUPERUSERS_FULL_ACCESS` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +SUPER_USERS: +- quayadmin +FEATURE_SUPERUSERS_FULL_ACCESS: True +--- +---- + +. Start, or restart, your {productname} deployment. + +==== Expected results + +With this feature enabled, your superusers should be able to read, write, and delete content from other repositories in namespaces that they do not own. To ensure that this feature is working as intended, use the following procedure. + +.Prerequisites + +* You have set the `FEATURE_SUPERUSERS_FULL_ACCESS` field to `true` in your `config.yaml` file. + +.Procedure + +. Open your {productname} registry and click *Create new account*. + +. Create a new user, for example, `user1`. + +. Log in as `user`. + +. Click *user1* under the *Users and Organizations*. + +. Create a new repository but clicking *creating a new repository*. + +. Enter a repository name, for example, `testrepo`, then click *Create private repository*. + +. Use the CLI to log in to the registry as `user`: ++ +[source,terminal] +---- +$ podman login --tls-verify=false quay-server.example.com +---- ++ +Example output: ++ +[source,terminal] +---- +Username: user1 +Password: +Login Succeeded! +---- + +. Pull a sample image by entering the following command: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/user1/testrepo/busybox:test +---- + +. Push the image to your {productname} registry: ++ +[source,terminal] +---- +$ sudo podman push --tls-verify=false quay-server.example.com/user1/testrepo/busybox:test +---- + +. Ensure that you have successfully pushed the image to your repository by navigating to `www.quay-server.example.com/repository/user1/testrepo/busybox` and clicking *Tags* in the navigation pane. + +. Sign out of `user1` by clicking *user1* -> *Sign out all sessions*. + +. Log out of the registry using the CLI: ++ +---- +$ podman logout quay-server.example.com +---- ++ +Example output: ++ +[source,terminal] +---- +Removed login credentials for quay-server.example.com +---- + +. On the UI, log in as the designated superuser with full access privileges, or example `quayadmin`. + +. On the CLI, log in as the designated superuser with full access privileges, or example `quayadmin`: ++ +[source,terminal] +---- +$ podman login quay-server.example.com +---- ++ +Example output: ++ +[source,terminal] +---- +Username: quayadmin +Password: +Login Succeeded! +---- + +. Now, you can pull the `busybox` image from `user1's` repository by entering the following command: ++ +[source,terminal] +---- +$ podman pull --tls-verify=false quay-server.example.com/user1/testrepo/busybox:test +---- ++ +Example output: ++ +---- +Trying to pull quay-server.example.com/stevsmit/busybox:test... +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Storing signatures +2bd29714875d9206777f9e8876033cbcd58edd14f2c0f1203435296b3f31c5f7 +---- + +. You can also push images to `user1's` repository by entering the following commands: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/user1/testrepo/busybox:test1 +---- ++ +[source,terminal] +---- +$ podman push quay-server.example.com/user1/testrepo/busybox:test1 +---- ++ +Example output: ++ +---- +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Storing signatures +---- + +. Additionally, you can delete images from `user1's` repository by using the tagged image's API: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http://quay-server.example.com/api/v1/repository/user1/testrepo/tag/test1 +---- ++ +[NOTE] +==== +For more information about obtaining OAuth tokens, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/red_hat_quay_api_guide/using_the_red_hat_quay_api#create_oauth_access_token[Create OAuth access token]. +==== + +[[enabling-feature-restricted-users]] +=== Enabling and testing `FEATURE_RESTRICTED_USERS` + +With this feature enabled, normal users are unable to create organizations. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_RESTRICTED_USERS` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +FEATURE_RESTRICTED_USERS: true +--- +---- + +. Start, or restart, your {productname} deployment. + +==== Expected results + +With this feature enabled, normal users cannot create organizations. To ensure that this feature is working as intended, use the following procedure. + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml`. +* Your {productname} registry has a sample tag. + +.Procedure + +. Log in as a normal {productname} user, for example, `user1`. + +. Click *Create New Organization* on the {productname} UI. + +. In the *Organization Name* box, enter a name, for example, `testorg`. + +. Click *Create Organization*. This will result in an `Unauthorized` messaged. + +[[enabling-restricted-users-read-only]] +=== Enabling and testing `RESTRICTED_USER_READ_ONLY` + +When `FEATURE_RESTRICTED_USERS` is set to `true`, `RESTRICTED_USER_READ_ONLY` restricts users to read-only operations. + +Use the following procedure to enable `FEATURE_RESTRICTED_USERS`. + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml`. + +.Procedure + +. In your deployment's `config.yaml` file, add the `RESTRICTED_USER_READ_ONLY` parameter set it to `true`: ++ +[source,yaml] +---- +FEATURE_RESTRICTED_USERS: true +RESTRICTED_USER_READ_ONLY: true +---- + +. Start, or restart, your {productname} deployment. + +==== Expected results + +With this feature enabled, users will only be able to perform read-only operations. Use the following procedures to ensure that this feature is working as intended: + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml`. +* `RESTRICTED_USER_READ_ONLY` is set to `true` in your `config.yaml`. +* Your {productname} registry has a sample tag. + +.Procedure + +. Log in to your {productname} registry as the normal user, for example, `user1`. + +. On the {productname} UI, click *Explore*. + +. Select a repository, for example, *quayadmin/busybox*. + +. Select *Tags* on the navigation pane. + +. Pull a sample tag from the repository, for example: ++ +[source,terminal] +---- +$ podman pull quay-server.example.com/quayadmin/busybox:test +---- ++ +Example output: ++ +[source,terminal] +---- +Trying to pull quay-server.example.com/quayadmin/busybox:test... +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Storing signatures +2bd29714875d9206777f9e8876033cbcd58edd14f2c0f1203435296b3f31c5f7 +---- + +Next, try to push an image. This procedure should result in `unauthorized`. + +. Tag an image by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/user1/busybox:test +---- + +. Push the image by entering the following command: ++ +[source,terminal] +---- +$ podman push quay-server.example.com/user1/busybox:test +---- ++ +Example output: ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Error: writing manifest: uploading manifest test to quay-server.example.com/user3/busybox: unauthorized: access to the requested resource is not authorized +---- + +Next, try to create an organization using the {productname} UI: + +. Log in to your {productname} registry as the whitelisted user, for example, `user1`. + +. On the UI, click *Create New Organization*. + +If properly configured, `user1` is unable to create a new organization. + + +[[enabling-restricted-users-whitelist]] +=== Enabling and testing `RESTRICTED_USERS_WHITELIST` + +When this feature is set, specified users are excluded from the `FEATURE_RESTRICTED_USERS` and `RESTRICTED_USER_READ_ONLY` configurations. Use the following procedure to exclude users from the `FEATURE_RESTRICTED_USERS` and `RESTRICTED_USER_READ_ONLY` settings so that they can have `read` and `write` privileges. + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml`. + +.Procedure + +. In your deployment's `config.yaml` file, add the `RESTRICTED_USERS_WHITELIST` parameter and a user, for example, `user1`: ++ +[source,yaml] +---- +FEATURE_RESTRICTED_USERS: true +RESTRICTED_USERS_WHITELIST: + - user2 +---- + +. Start, or restart, your {productname} deployment. + +==== Expected results + +With this feature enabled, whitelisted users can create organizations, or read or write content from the repository even if `FEATURE_RESTRICTED_USERS` is set to `true`. To ensure that this feature is working as intended, use the following procedures. + +. Log in to your {productname} registry as the white listed user, for example, `user2`. + +. On the UI, click *Create New Organization*. + +. Enter an organization name, for example, `testorg`. + +. Click *Create Organization*. If successful, you will be loaded on to the organization's page. + +Next, as the white listed user, try to push an image. This should result in a successfully pushed image. + +. Tag an image by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/user2/busybox:test +---- + +. Push the image by entering the following command: ++ +[source,terminal] +---- +$ podman push quay-server.example.com/user2/busybox:test +---- ++ +Example output: ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Storing signatures +---- + +//// + +[[enabling-ui-v2]] +=== Enabling and testing `FEATURE_UI_V2` + +With this feature enabled, you can toggle between the current version of the user interface, and the new version of the user interface. + +[IMPORTANT] +==== +* This UI is currently in beta and subject to change. In its current state, users can only create, view, and delete organizations, repositories, and image tags. +* When running {productname} in the old UI, timed-out sessions would require that the user input their password again in the pop-up window. With the new UI, users are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. +* There is a discrepancy in how image manifest sizes are reported between the legacy UI and the new UI. In the legacy UI, image manifests were reported in mebibytes. In the new UI, {productname} uses the standard definition of megabyte (MB) to report image manifest sizes. +==== + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_UI_V2` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +FEATURE_TEAM_SYNCING: false +FEATURE_UI_V2: true +FEATURE_USER_CREATION: true +--- +---- + +. Log in to your {productname} deployment. + +. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to new UI, and then click *Use Beta Environment*, for example: ++ +image:38-ui-toggle.png[{productname} 3.8 UI toggle] + +==== Creating a new organization in the {productname} 3.8 beta UI + +.Prerequisites + +* You have toggled your {productname} deployment to use the 3.8 beta UI. + +Use the following procedure to create an organization using the {productname} 3.8 beta UI. + +.Procedure + +. Click *Organization* in the navigation pane. + +. Click *Create Organization*. + +. Enter an *Organization Name*, for example, `testorg`. + +. Click *Create*. + +Now, your example organization should populate under the *Organizations* page. + +==== Deleting an organization using the {productname} 3.8 beta UI + +Use the following procedure to delete an organization using the {productname} 3.8 beta UI. + +.Procedure + +. On the *Organizations* page, select the name of the organization you want to delete, for example, `testorg`. + +. Click the *More Actions* drop down menu. + +. Click *Delete*. ++ +[NOTE] +==== +On the *Delete* page, there is a *Search* input box. With this box, users can search for specific organizations to ensure that they are properly scheduled for deletion. For example, if a user is deleting 10 organizations and they want to ensure that a specific organization was deleted, they can use the *Search* input box to confirm said organization is marked for deletion. +==== + +. Confirm that you want to permanently delete the organization by typing *confirm* in the box. + +. Click *Delete*. + +After deletion, you are returned to the *Organizations* page. + +[NOTE] +==== +You can delete more than one organization at a time by selecting multiple organizations, and then clicking *More Actions* -> *Delete*. +==== + +==== Creating a new repository using the {productname} 3.8 beta UI + +Use the following procedure to create a repository using the {productname} 3.8 beta UI. + +.Procedure + +. Click *Repositories* on the navigation pane. + +. Click *Create Repository*. + +. Select a namespace, for example, *quayadmin*, and then enter a *Repository name*, for example, `testrepo`. + +. Click *Create*. + +Now, your example repository should populate under the *Repositories* page. + +==== Deleting a repository using the {productname} 3.8 beta UI + +.Prerequisites + +* You have created a repository. + +.Procedure + +. On the *Repositories* page of the {productname} 3.8 beta UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +==== Pushing an image to the {productname} 3.8 beta UI + +Use the following procedure to push an image to the {productname} 3.8 beta UI. + +.Procedure + +. Pull a sample image from an external registry: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your {productname} registry: ++ +[source,terminal] +---- +$ podman push quay-server.example.com/quayadmin/busybox:test +---- + +. Navigate to the *Repositories* page on the {productname} UI and ensure that your image has been properly pushed. + +. You can check the security details by selecting your image tag, and then navigating to the *Security Report* page. + +==== Deleting an image using the {productname} 3.8 beta UI + +Use the following procedure to delete an image using the{productname} 3.8 beta UI. + +.Prerequisites + +* You have pushed an image to your {productname} registry. + +.Procedure + +. On the *Repositories* page of the {productname} 3.8 beta UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +=== Enabling the {productname} legacy UI + +. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to *Current UI*. ++ +image:38-ui-toggle.png[{productname} 3.8 UI toggle] + +[[leveraging-storage-quota-limits]] + +//// +=== Leveraging storage quota limits in proxy organizations + +With {productname} 3.8, the proxy cache feature has been enhanced with an auto-pruning feature for tagged images. The auto-pruning of image tags is only available when a proxied namespace has quota limitations configured. Currently, if an image size is greater than quota for an organization, the image is skipped from being uploaded until an administrator creates the necessary space. Now, when an image is pushed that exceeds the allotted space, the auto-pruning enhancement marks the least recently used tags for deletion. As a result, the new image tag is stored, while the least used image tag is marked for deletion. + +[IMPORTANT] +==== +* As part of the auto-pruning feature, the tags that are marked for deletion are eventually garbage collected by the garbage collector (gc) worker process. As a result, the quota size restriction is not fully enforced during this period. +* Currently, the namespace quota size computation does not take into account the size for manifest child. This is a known issue and will be fixed in a future version of {productname}. +==== + +==== Testing the storage quota limits feature in proxy organizations + +Use the following procedure to test the auto-pruning feature of an organization with proxy cache and storage quota limitations enabled. + +.Prerequisites + +* Your organization is configured to serve as a proxy organization. The following example proxies from quay.io. + +* `FEATURE_PROXY_CACHE` is set to `true` in your `config.yaml` file. + +* `FEATURE_QUOTA_MANAGEMENT` is set to `true` in your `config.yaml` file. + +* Your organization is configured with a quota limit, for example, `150 MB`. + +.Procedure + +. Pull an image to your repository from your proxy organization, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/clair:4.2.3 +---- + +. Depending on the space left in your repository, you might need to pull additional images from your proxy organization, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/clair:4.1.5 +---- + +. In the {productname} registry UI, click the name of your repository. + +* Click *Tags* in the navigation pane and ensure that `clair:4.2.3` and `clair:4.1.5` are tagged. + +. Pull the last image that will result in your repository exceeding the the allotted quota, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/clair:4.1.4 +---- + +. Refresh the *Tags* page of your {productname} registry. The first image that you pushed, for example, `clair:4.2.3` should have been auto-pruned. The *Tags* page should now show `clair:4.2.3` and `clair:4.1.4`. +//// \ No newline at end of file diff --git a/modules/testing-clair.adoc b/modules/testing-clair.adoc new file mode 100644 index 000000000..360ef3939 --- /dev/null +++ b/modules/testing-clair.adoc @@ -0,0 +1,61 @@ +:_content-type: CONCEPT +[id="testing-clair"] += Testing Clair + + + + + + + + + + + + + + + + + + +//// + +Currently, there are two methods for testing Clair independently of a {productname} subscription: + +* In a local development environment +* In a distributed deployment + +[IMPORTANT] +==== +Official documentation for testing Clair without a {productname} subscription is unsupported. These procedures and subsequent updates are maintained by upstream contributors and developers. For more information, see link:https://quay.github.io/clair/howto/getting_started.html[Getting Started With ClairV4]. + +For official Clair documentation, see. . . +==== + +[id="testing-clair-local-development-environment"] +== Testing Clair in a local development environment + +The simplest way to run Clair for test purposes is to use the local development environment. The local development environment can be used to test and develop Clair's integration with {productname}. Documentation for this procedure can be found on the open source Clair project at link:https://quay.github.io/clair/howto/testing.html[Testing ClairV4]. + +[id="clair-modes"] +== Testing Clair in a distributed deployment + +When testing Clair in a distributed deployment, Clair uses PostgreSQL for its data persistence. Clair migrations are supported. Users can point Clair to a fresh database to set it up. + +In a distributed deployment, users can test run Clair in the following modes: + +* Indexer mode. When Clair is running in indexer mode, it is responsible for receiving manifests and generating `IndexReports`. An `IndexReport` is an intermediate representation of a manifest's content and is used to discover vulnerabilities. + +* Matcher mode. When Clair is running in matcher mode, it is responsible for receiving `IndexReports` and generating `VulnerabilityReports`. A `VulnerabilityReport` describes the contents of a manifest and any vulnerabilities affecting it. + +* Notifier mode. When Clair is running in notifier mode, it is responsible for generating notifications when new vulnerabilities affecting a previously indexed manifest enter the system. The notifier will send notifications through the configured mechanisms. + +* Combination mode. When Clair is running in combination mode, the `indexer`, `matcher`, and `notifier` each run on a single OS process. + +[NOTE] +==== +These modes are available when running Clair with a {productname} subscription. +==== + +For more information on testing Clair in a distributed deployment, see link:https://quay.github.io/clair/howto/getting_started.html#modes[Getting Started With ClairV4]. diff --git a/modules/testing-features.adoc b/modules/testing-features.adoc index ff577f945..5ab77199d 100644 --- a/modules/testing-features.adoc +++ b/modules/testing-features.adoc @@ -1,9 +1,9 @@ [[testing-features]] -= Testing Red Hat Quay 3.7 Features += Testing {productname} 3.7 Features The following features can be tested in one of two ways: - * Using the Red Hat Quay Operator in the Community Catalog of your OpenShift Container Platform cluster. + * Using the {productname} Operator in the Community Catalog of your OpenShift Container Platform cluster. * Using the following standalone images: + [source,yaml] diff --git a/modules/troubleshooting-forgotten-passwords.adoc b/modules/troubleshooting-forgotten-passwords.adoc new file mode 100644 index 000000000..3c13838fc --- /dev/null +++ b/modules/troubleshooting-forgotten-passwords.adoc @@ -0,0 +1,110 @@ +:_content-type: CONCEPT +[id="troubleshooting-forgotten-passwords"] += Troubleshooting forgotten superuser passwords on {productname} + +Use the following procedures to reset superuser passwords on {productname}. + +[id="resetting-superuser-password-on-standalone"] +== Resetting superuser passwords on {productname} standalone deployments + +Use the following procedure to reset a superuser's password. + +.Prerequisites + +* You have created a {productname} superuser. +* You have installed Python 3.9. +* You have installed the `pip` package manager for Python. +* You have installed the `bcrypt` package for `pip`. + +.Procedure + +. Generate a secure, hashed password using the `bcrypt` package in Python 3.9 by entering the following command: ++ +[source,terminal] +---- +$ python3.9 -c 'import bcrypt; print(bcrypt.hashpw(b"newpass1234", bcrypt.gensalt(12)).decode("utf-8"))' +---- ++ +.Example output ++ +[source,terminal] +---- +$2b$12$T8pkgtOoys3G5ut7FV1She6vXlYgU.6TeoGmbbAVQtN8X8ch4knKm +---- + +. Enter the following command to show the container ID of your {productname} container registry: ++ +[source,terminal] +---- +$ sudo podman ps -a +---- ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +70560beda7aa registry.redhat.io/rhel8/redis-5:1 run-redis 2 hours ago Up 2 hours ago 0.0.0.0:6379->6379/tcp redis +8012f4491d10 registry.redhat.io/quay/quay-rhel8:v3.8.2 registry 3 minutes ago Up 8 seconds ago 0.0.0.0:80->8080/tcp, 0.0.0.0:443->8443/tcp quay +8b35b493ac05 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 39 seconds ago Up 39 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +---- + +. Execute an interactive shell for the `postgresql` container image by entering the following command: ++ +[source,terminal] +---- +$ sudo podman exec -it 8b35b493ac05 /bin/bash +---- + +. Re-enter the `quay` PostgreSQL database server, specifying the database, username, and host address: ++ +[source,terminal] +---- +bash-4.4$ psql -d quay -U quayuser -h 192.168.1.28 -W +---- + +. Update the `password_hash` of the superuser admin who lost their password: ++ +[source,terminal] +---- +quay=> UPDATE public.user SET password_hash = '$2b$12$T8pkgtOoys3G5ut7FV1She6vXlYgU.6TeoGmbbAVQtN8X8ch4knKm' where username = 'quayadmin'; +---- ++ +.Example output ++ +[source,terminal] +---- +UPDATE 1 +---- + +. Enter the following to command to ensure that the `password_hash` has been updated: ++ +[source,terminal] +---- +quay=> select * from public.user; +---- ++ +.Example output ++ +[source,terminal] +---- +id | uuid | username | password_hash | email | verified | stripe_id | organization | robot | invoice_email | invalid_login_attempts | last_invalid_login |removed_tag_expiration_s | enabled | invoice_email_address | company | family_name | given_name | location | maximum_queued_builds_count | creation_date | last_accessed +----+--------------------------------------+-----------+--------------------------------------------------------------+-----------------------+--- +-------+-----------+--------------+-------+---------------+------------------------+----------------------------+--------------------------+------ +---+-----------------------+---------+-------------+------------+----------+-----------------------------+----------------------------+----------- +1 | 73f04ef6-19ba-41d3-b14d-f2f1eed94a4a | quayadmin | $2b$12$T8pkgtOoys3G5ut7FV1She6vXlYgU.6TeoGmbbAVQtN8X8ch4knKm | quayadmin@example.com | t | | f | f | f | 0 | 2023-02-23 07:54:39.116485 | 1209600 | t | | | | | | | 2023-02-23 07:54:39.116492 +---- + +. Log in to your {productname} deployment using the new password: ++ +[source,terminal] +---- +$ sudo podman login -u quayadmin -p newpass1234 http://quay-server.example.com --tls-verify=false +---- ++ +.Example output: ++ +[source,terminal] +---- +Login Succeeded! +---- diff --git a/modules/unmanaging-clair-database.adoc b/modules/unmanaging-clair-database.adoc new file mode 100644 index 000000000..99929ee33 --- /dev/null +++ b/modules/unmanaging-clair-database.adoc @@ -0,0 +1,32 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="unmanaging-clair-database"] += Running a custom Clair configuration with an unmanaged Clair database + +Use the following procedure to set your Clair database to unmanaged. + +.Procedure + +* In the Quay Operator, set the `clairpostgres` component of the `QuayRegistry` custom resource to `managed: false`: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: quay370 +spec: + configBundleSecret: config-bundle-secret + components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: tls + managed: false + - kind: clairpostgres + managed: false +---- \ No newline at end of file diff --git a/modules/using-the-api-to-create-an-organization.adoc b/modules/using-the-api-to-create-an-organization.adoc new file mode 100644 index 000000000..443bbb452 --- /dev/null +++ b/modules/using-the-api-to-create-an-organization.adoc @@ -0,0 +1,75 @@ +:_content-type: PROCEDURE +[id="using-the-api-to-create-an-organization"] +== Using the API to create an organization + +The following procedure details how to use the API to create a {productname} organization. + +.Prerequisites + +* You have invoked the `/api/v1/user/initialize` API, and passed in the username, password, and email address. +* You have called out the rest of the {productname} API by specifying the returned OAuth code. + +.Procedure + +. To create an organization, use a POST call to `api/v1/organization/` endpoint: ++ +[source,terminal] +---- +$ curl -X POST -k --header 'Content-Type: application/json' -H "Authorization: Bearer 6B4QTRSTSD1HMIG915VPX7BMEZBVB9GPNY2FC2ED" https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/organization/ --data '{"name": "testorg", "email": "testorg@example.com"}' +---- ++ +Example output: ++ +[source,yaml] +---- +"Created" +---- + +. You can retrieve the details of the organization you created by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -k --header 'Content-Type: application/json' -H "Authorization: Bearer 6B4QTRSTSD1HMIG915VPX7BMEZBVB9GPNY2FC2ED" https://min-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/organization/testorg +---- ++ +Example output: ++ +[source,terminal] +---- +{ + "name": "testorg", + "email": "testorg@example.com", + "avatar": { + "name": "testorg", + "hash": "5f113632ad532fc78215c9258a4fb60606d1fa386c91b141116a1317bf9c53c8", + "color": "#a55194", + "kind": "user" + }, + "is_admin": true, + "is_member": true, + "teams": { + "owners": { + "name": "owners", + "description": "", + "role": "admin", + "avatar": { + "name": "owners", + "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", + "color": "#c7c7c7", + "kind": "team" + }, + "can_view": true, + "repo_count": 0, + "member_count": 1, + "is_synced": false + } + }, + "ordered_teams": [ + "owners" + ], + "invoice_email": false, + "invoice_email_address": null, + "tag_expiration_s": 1209600, + "is_free_account": true +} +---- diff --git a/modules/using-the-oauth-token.adoc b/modules/using-the-oauth-token.adoc new file mode 100644 index 000000000..5d4bfda1f --- /dev/null +++ b/modules/using-the-oauth-token.adoc @@ -0,0 +1,45 @@ +:_content-type: PROCEDURE +[id="using-the-oauth-token"] +== Using the OAuth token + +After invoking the API, you can call out the rest of the {productname} API by specifying the returned OAuth code. + +.Prerequisites + +* You have invoked the `/api/v1/user/initialize` API, and passed in the username, password, and email address. + +.Procedure + +* Obtain the list of current users by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -k -H "Authorization: Bearer 6B4QTRSTSD1HMIG915VPX7BMEZBVB9GPNY2FC2ED" https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/superuser/users/ +---- ++ +Example output: ++ +[source,yaml] +---- +{ + "users": [ + { + "kind": "user", + "name": "quayadmin", + "username": "quayadmin", + "email": "quayadmin@example.com", + "verified": true, + "avatar": { + "name": "quayadmin", + "hash": "3e82e9cbf62d25dec0ed1b4c66ca7c5d47ab9f1f271958298dea856fb26adc4c", + "color": "#e7ba52", + "kind": "user" + }, + "super_user": true, + "enabled": true + } + ] +} +---- ++ +In this instance, the details for the `quayadmin` user are returned as it is the only user that has been created so far. \ No newline at end of file diff --git a/release_notes/master.adoc b/release_notes/master.adoc index c613b1d62..692306ee7 100644 --- a/release_notes/master.adoc +++ b/release_notes/master.adoc @@ -1,50 +1,38 @@ include::modules/attributes.adoc[] -[id='quay-release-notes'] +[id="quay-release-notes"] = {productname} Release Notes -{productname} is regularly released, containing new features, bug fixes, and software updates. -We highly recommend deploying the latest version of {productname}. +{productname} container registry platform provides secure storage, distribution, and governance of containers and cloud-native artifacts on any infrastructure. It is available as a standalone component or as an Operator on {ocp}. {productname} includes the following features and benefits: -ifdef::downstream[] -For {productname} documentation, you should know that: +* Granular security management +* Fast and robust at any scale +* High velocity CI/CD +* Automated installation and upates +* Enterprise authentication and team-based access control +* {ocp} integration -* Documentation is versioned along with each major release -* The latest {productname} documentation is available from the link:https://access.redhat.com/documentation/en-us/red_hat_quay[Red Hat Quay Documentation] page -* Prior to version 2.9.2, the product was referred to as Quay Enterprise -* Documentation versions prior to 2.9.2 are archived on the link:https://coreos.com/quay-enterprise/docs/latest/[CoreOS] site -endif::downstream[] +{productname} is regularly released, containing new features, bug fixes, and software updates. To upgrade {productname} for both standalone and {ocp} deployments, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/upgrade_red_hat_quay/index[Upgrade {productname}]. -{productname}, version 3 is the latest major version. +[IMPORTANT] +==== +{productname} only supports rolling back, or downgrading, to previous z-stream versions, for example, 3.7.2 -> 3.7.1. Rolling back to previous y-stream versions (3.7.0 -> 3.6.0) is not supported. This is because {productname} updates might contain database schema upgrades that are applied when upgrading to a new version of {productname}. Database schema upgrades are not considered backwards compatible. -include::modules/rn_3_70.adoc[leveloffset=+1] -include::modules/rn_3_60.adoc[leveloffset=+1] -include::modules/rn_3_50.adoc[leveloffset=+1] -include::modules/rn_3_40.adoc[leveloffset=+1] -include::modules/rn_3_30.adoc[leveloffset=+1] -include::modules/rn_3_20.adoc[leveloffset=+1] -include::modules/rn_3_10.adoc[leveloffset=+1] +Downgrading to previous z-streams is neither recommended nor supported by either Operator based deployments or virtual machine based deployments. Downgrading should only be done in extreme circumstances. The decision to rollback your {productname} deployment must be made in conjunction with the {productname} support and development teams. For more information, contact {productname} support. +==== ifdef::downstream[] -include::modules/rn_3_00.adoc[leveloffset=+1] -include::modules/rn_2_90.adoc[leveloffset=+1] -include::modules/rn_2_80.adoc[leveloffset=+1] -include::modules/rn_2_70.adoc[leveloffset=+1] -include::modules/rn_2_60.adoc[leveloffset=+1] -include::modules/rn_2_50.adoc[leveloffset=+1] -include::modules/rn_2_40.adoc[leveloffset=+1] -include::modules/rn_2_30.adoc[leveloffset=+1] -include::modules/rn_2_20.adoc[leveloffset=+1] -include::modules/rn_2_10.adoc[leveloffset=+1] -include::modules/rn_2_00.adoc[leveloffset=+1] -include::modules/rn_1_18.adoc[leveloffset=+1] -include::modules/rn_1_17.adoc[leveloffset=+1] -include::modules/rn_1_16.adoc[leveloffset=+1] -include::modules/rn_1_15.adoc[leveloffset=+1] -include::modules/rn_1_14.adoc[leveloffset=+1] -include::modules/rn_1_13.adoc[leveloffset=+1] -include::modules/rn_1_12.adoc[leveloffset=+1] + +Documentation for {productname} is versioned with each release. The latest {productname} documentation is available from the link:https://access.redhat.com/documentation/en-us/red_hat_quay[{productname} Documentation] page. Currently, version 3 is the latest major version. + +[NOTE] +==== +Prior to version 2.9.2, {productname} was called Quay Enterprise. Documentation for 2.9.2 and prior versions are archived on the link:https://access.redhat.com/documentation/en-us/red_hat_quay/2.9[Product Documentation for Red Hat Quay 2.9] page. +==== + endif::downstream[] +include::modules/rn_3_90.adoc[leveloffset=+1] + [discrete] diff --git a/troubleshooting_quay/docinfo.xml b/troubleshooting_quay/docinfo.xml new file mode 100644 index 000000000..09aac9ba9 --- /dev/null +++ b/troubleshooting_quay/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Troubleshooting {productname} + + Troubleshooting {productname} + + + Red Hat OpenShift Documentation Team + + diff --git a/troubleshooting_quay/master.adoc b/troubleshooting_quay/master.adoc new file mode 100644 index 000000000..ef759f38e --- /dev/null +++ b/troubleshooting_quay/master.adoc @@ -0,0 +1,25 @@ +:_content-type: ASSEMBLY +include::modules/attributes.adoc[] + +[id="troubleshooting-quay"] += Troubleshooting {productname} + +Use the content in this guide to troubleshoot your {productname} registry on both standalone and Operator based deployments. + +[id="troubleshooting-quay-standalone"] +== Troubleshooting {productname} standalone deployments + +Topics covered here include: + +* Resetting a forgotten superuser password on {productname} + +include::modules/troubleshooting-forgotten-passwords.adoc[leveloffset=+1] + +[id="troubleshooting-quay-operator"] +== Troubleshooting {productname} Operator deployments + +Topics covered here include: + +* Resetting a forgotten superuser password on {productname} + +include::modules/resetting-superuser-password-on-operator.adoc[leveloffset=+1] \ No newline at end of file diff --git a/troubleshooting_quay/modules b/troubleshooting_quay/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/troubleshooting_quay/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/upgrade_quay/master.adoc b/upgrade_quay/master.adoc index 3fe885951..13313e021 100644 --- a/upgrade_quay/master.adoc +++ b/upgrade_quay/master.adoc @@ -18,3 +18,5 @@ include::modules/operator-upgrade.adoc[leveloffset=+1] include::modules/proc_upgrade_standalone.adoc[leveloffset=+1] include::modules/qbo-operator-upgrade.adoc[leveloffset=+1] + +include::modules/downgrade-quay-deployment.adoc[leveloffset=+1] diff --git a/use_quay/master.adoc b/use_quay/master.adoc index 80d3677be..2c8d36b6f 100644 --- a/use_quay/master.adoc +++ b/use_quay/master.adoc @@ -63,6 +63,7 @@ include::modules/quay-as-cache-proxy.adoc[leveloffset=+1] include::modules/proxy-cache-arch.adoc[leveloffset=+2] include::modules/proxy-cache-limitations.adoc[leveloffset=+2] include::modules/proxy-cache-procedure.adoc[leveloffset=+2] +include::modules/proxy-cache-leveraging-storage-quota-limits.adoc[leveloffset=+2] // Virtual builders include::modules/build-enhancements.adoc[leveloffset=+1] diff --git a/welcome.adoc b/welcome.adoc index 6f709aff2..f31ddffe8 100644 --- a/welcome.adoc +++ b/welcome.adoc @@ -18,18 +18,23 @@ Quay can be deployed in a variety of configurations, both within and outside of xref:deploy_quay_on_openshift_op_tng.adoc[Deploy with Openshift Operator] -xref:deploy_quay.adoc[Basic Deploy] +xref:deploy_quay.adoc[Deploy Proof of Concept] xref:deploy_quay_ha.adoc[Deploy High Availability] == Managing Quay +xref:config_quay.adoc[Configure {productname}] + xref:manage_quay.adoc[Manage {productname}] +xref:upgrade_quay.adoc[Upgrade {productname}] + == Using Quay xref:use_quay.adoc[Use {productname}] +xref:api_quay.adoc[{productname} API Guide] NOTE: Help make {productname} docs better on https://github.com/quay/quay-docs[github]