diff --git a/.github/workflows/cherry-pick.yml b/.github/workflows/cherry-pick.yml new file mode 100644 index 000000000..0137d84b5 --- /dev/null +++ b/.github/workflows/cherry-pick.yml @@ -0,0 +1,36 @@ +name: Cherry Pick + +on: + issue_comment: + types: [created] + +jobs: + cherry-pick: + runs-on: ubuntu-latest + if: | + startsWith(github.event.comment.body, '/cherry-pick') + steps: + - name: Check out repository + uses: actions/checkout@v2 + + - name: Set up Git + run: | + git config --global user.name "${{ github.actor }}" + git config --global user.email "${{ github.actor }}@users.noreply.github.com" + - name: Extract target branch + id: extract + run: echo "::set-output name=branch::$(echo "${{ github.event.comment.body }}" | cut -d' ' -f2)" + + - name: Cherry-pick the PR + env: + GH_PAT: ${{ secrets.GH_PAT }} + run: | + TARGET_BRANCH=${{ steps.extract.outputs.branch }} + git fetch origin ${{ github.event.pull_request.head.ref }} + git checkout $TARGET_BRANCH + git cherry-pick ${{ github.event.pull_request.head.sha }} || exit 0 + - name: Push changes + env: + GH_PAT: ${{ secrets.GH_PAT }} + run: | + git push https://${GH_PAT}@github.com/${{ github.repository }} $TARGET_BRANCH diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..6378f560a --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,24 @@ +name: ci +on: + push: + branches: + - master + - main +permissions: + contents: write +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.x + - uses: actions/cache@v4 + with: + key: ${{ github.ref }} + path: .cache + - run: pip install mkdocs-material + - run: pip install mkdocs-swagger-ui-tag + - run: pip install "mkdocs-material[imaging]" + - run: mkdocs gh-deploy --force diff --git a/.gitleaks.toml b/.gitleaks.toml new file mode 100644 index 000000000..c8ba4d00a --- /dev/null +++ b/.gitleaks.toml @@ -0,0 +1,54 @@ +[allowlist] +description = "Allow bcrypt hashes used in SQL updates" + +regexes = [ + # Ignore bcrypt password hashes (e.g., $2b$12$...) + '''\$2b\$12\$[A-Za-z0-9./]{53}''' +] + +paths = [ + # Ignore all example certs + '''\/example.*\.pem$''', + + # Ignore anything with the word funkymonkey anywhere in the path (example values below) + '''ANSWCWSGLVAUQ60L4Q4CEO3C1QAYGEXZK2VKJNI''', + '''E6GJSHOZMFBVNHTHNB53''', + '''MCJ61D8KQBFS2DXM56S2''', + '''J5G7CCX5QCA8Q5XZLWGI7USJPSM4M5MQHJED46CF''', + '''IG58PX2REEY9O08IZFZE''', + '''2LWTWO89KH26P2CO4TWFM7PGCX4V4SUZES2CIZMR''', + '''6XBK7QY7ACSCN5XBM3GS''', + '''AVKBOUXTFO3MXBBK5UJD5QCQRN2FWL3O0XPZZT78''', + '''SANSWCWSGLVAUQ60L4Q4CEO3C1QAYGEXZK2VKJNI''', + '''WB4FUG4PP2278KK579EN4NDP150CPYOG6DN42MP6JF8IAJ4PON4RC7DIOH5UEFBP''', + '''MXFE7NSOWPN33O7UC3THY0BN03DW940CMWTLRBE2EPTI8JPX0B0CWIIDGTI4YTJ6''', + '''IJWZ8TIY301KPFOW3WEUJEVZ3JR11CY1''', + '''9Q36xF54YEOLjetayC0NBaIKgcFFmIHsS3xTZDLzZSrhTBkxUc9FDwUKfnxLWhco6oBJV1NDBjoBcDGmsZMYPt1dSA4yWpPe/JKY9pnDcsw=''', + '''MXZ9DATUWRD8WCMT8AZIPYE0IEZHJJ1B8P8ZEIXC0W552DUMMTNJJH02HFGXTOVG''', + '''CWLBVAODE61IXNDJ40GERFOZPB3ARZDRCP4X70ID1NB28AI0OOJBTR9S4M0ACYMD''', + '''BILZ6YTVAZAKOGMD9270OKN3SOD9KPB7OLKEJQOJE38NBBRUJTIH7T5859DJL31Q''', + '''QBFYWIWZOS1I0P0R9N1JRNP1UZAOPUIR3EB4ASPZKK9IA1SFC12LTEF7OJHB05Z8''', + '''E6GJSHOZMFBVNHTHNB53''', + '''postgresql://:test123@172.24.10.50/quay''', + '''postgresql://:test123@172.24.10.50/example-restore-registry-quay-database''', + '''quayadmin''', + '''DB_URI: postgresql://restore-registry-quay-database:zLTm315muk6rz7mL4aFuLQ2Q8rAk-dB4kPHQ2WMvdyqhaZywf20503wCZfv2Ml1f15LUsDN2-0m71gnI@restore-registry-quay-database:5432/restore-registry-quay-database +''', + '''DB_URI: postgresql://quay360-quay-database:0vrsIUYdhCnF8r-jwz7zR6gck6kcLLQhJ11u0dx1lz8YBk185P5NnqIBwtY22JArYLi3opdKJH2-w4aM@quay360-quay-database:5432/quay360-quay-database +''', + '''XyThQKm6lMWh4O7dKdmRwMUHB9ktxPPVSRIePOY2''', + '''VvoFhVFp8BqcOgQ9LczE''', + '''DB_URI: postgresql://restore-registry-quay-database:zLTm315muk6rz7mL4aFuLQ2Q8rAk-dB4kPHQ2WMvdyqhaZywf20503wCZfv2Ml1f15LUsDN2-0m71gnI@restore-registry-quay-database:5432/restore-registry-quay-database +''', + '''postgresql://quayuser:quaypass@quay-server:5432/quay''', + '''4b1c5663-88c6-47ac-b4a8-bb594660f08b''', + '''postgresql://example-registry-quay-database:OyC4zGhJMbi3yUzW1aIgOLQNW18r14nAcuJfbsjtrAXUVInj2JgwLskQPOutPCXMtlKr1UPTsIPqOEjV@example-registry-quay-database:5432/example-registry-quay-database''', + '''postgresql://restore-registry-quay-database:zLTm315muk6rz7mL4aFuLQ2Q8rAk-dB4kPHQ2WMvdyqhaZywf20503wCZfv2Ml1f15LUsDN2-0m71gnI@restore-registry-quay-database:5432/restore-registry-quay-database''', + '''postgresql://example-restore-registry-quay-database:onHl1LDsspZh4hoOL5wW1Of7GV0Kmtp2@example-restore-registry-quay-database:5432/example-restore-registry-quay-database''', + '''postgresql://example-restore-registry-quay-database:onHl1LDsspZh4hoOL5wW1Of7GV0Kmtp2@example-restore-registry-quay-database:5432/example-restore-registry-quay-database''', + '''zsk/j4zEOkQq+W0BQJdSufP+IackV8WICXB5zvdF''', + '''1H36Izzc90cUNVHaiaUX''', + '''iO1b3RUt4KKgjSimCROSPN3cEMn4TqSgsPyniMBR''', + '''EH67NB3Y6PTBED8H0HC6UVHGGGA3ODSE''', + '''fn37AZAUQH0PTsU+vlO9lS0QxPW9A/boXL4ovZjIFtlUPrBz9i4j9UDOqMjuxQ/0HTfy38goKEpG8zYXVeQh3lOFzuOjSvKic2Vq7xdtQsU=''', +] diff --git a/.vale.ini b/.vale.ini new file mode 100644 index 000000000..0d97147d2 --- /dev/null +++ b/.vale.ini @@ -0,0 +1,8 @@ +StylesPath = styles + +MinAlertLevel = suggestion + +Packages = RedHat + +[*] +BasedOnStyles = RedHat \ No newline at end of file diff --git a/.vale/styles/Vocab/OpenShiftDocs/accept.txt b/.vale/styles/Vocab/OpenShiftDocs/accept.txt new file mode 100644 index 000000000..d79ca1ba7 --- /dev/null +++ b/.vale/styles/Vocab/OpenShiftDocs/accept.txt @@ -0,0 +1,11 @@ +# Regex terms added to accept.txt are ignored by the Vale linter and override RedHat Vale rules. +# Add terms that have a corresponding incorrectly capitalized form to reject.txt. + +[Pp]assthrough +Assisted Installer +Control Plane Machine Set Operator +custom resource +custom resources +MetalLB +Operator +Operators \ No newline at end of file diff --git a/.vale/styles/Vocab/OpenShiftDocs/reject.txt b/.vale/styles/Vocab/OpenShiftDocs/reject.txt new file mode 100644 index 000000000..5a9d2b4c2 --- /dev/null +++ b/.vale/styles/Vocab/OpenShiftDocs/reject.txt @@ -0,0 +1,15 @@ +# Regex terms added to reject.txt are highlighted as errors by the Vale linter and override RedHat Vale rules. +# Add terms that have a corresponding correctly capitalized form to accept.txt. + +[Dd]eployment [Cc]onfigs? +[Dd]eployment [Cc]onfigurations? +[Oo]peratorize +[Ss]ingle [Nn]ode OpenShift +[Tt]hree [Nn]ode OpenShift +AI +configuration maps? +MachineSets +machinesets? +minions? +operators? +SNO \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index f93e1dbd0..a20e22283 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -4,5 +4,9 @@ "markdown", "latex", "plaintext" + ], + "cSpell.words": [ + "OIDC", + "productname" ] } \ No newline at end of file diff --git a/README.adoc b/README.adoc index df6631d31..fb6db7746 100644 --- a/README.adoc +++ b/README.adoc @@ -1,33 +1,35 @@ -= Documentation for the {productname} product += Contributing to Red Hat Quay documentation :downstream: == Repository structure -Structure of this repository: +The Red Hat Quay repository is structured as follows: -* Books go into a top-level directory. For example: `repo_dir/manage_quay/`. -* Each book directory has a symlink to the top-level `repo_dir/modules/` directory. -* A book's TOC is defined in the `master.adoc` file contained within the book's directory. -* `master.adoc` contains `include` statements to modules (chapters), which are created in the top-level `modules/` directory. -* `docinfo.xml` in the book's directory contains basic information about the book. +* Books go into a top-level directory. For example, `repo_dir/manage_quay` or `repo_dir/release_notes`. +* Each book directory has a symlink to the top-level `repo_dir/modules` directory. +* A book's _table of contents_, or ToC, is defined in the `master.adoc` that is contained within the book's directory. Each directory has its own `master.adoc` file. +* The `master.adoc` file contains `include` statements to _modules_, which act as chapters and subchapters. These are created in the top-level `modules/` directory. +* The `docinfo.xml` in the book's directory contains basic information about the book, such as the product name, the product version, and the organization name. -== How do I set up? +== Setting up your repository for contribution ifdef::downstream[] -. Get the _Developer_, the _Maintainer_, or the _Owner_ permissions for the https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/[downstream repository]. For that, contact a Maintainer or an Owner from this https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/-/project_members[list]. Default to contacting Vikram Goyal. +. For _downstream_ contribution, which is the official Red Hat Quay documentation found on the Red Hat portal, you must obtain _Developer_, _Maintainer_, or _Owner_ permissions for the https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/[downstream repository]. ++ +To obtain the necessary permissions, contact a Maintainer or Owner from the Gitlab project members https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/-/project_members[list]. Default to contacting Steven Smith. endif::downstream[] -. Fork the https://github.com/quay/quay-docs[upstream repository] by clicking "Fork". +. Fork the https://github.com/quay/quay-docs[upstream repository] by clicking the *Fork* button. . Clone your fork of the repository to your computer: + ---- -$ git clone git@github.com:/quay-docs.git +$ git clone git@github.com:/quay-docs.git ---- + -Substitute with your GitHub user name. +Substitute `` with your GitHub user name. . Navigate to the cloned repository: + @@ -54,15 +56,23 @@ endif::downstream[] [id="how-do-i-make-a-contribution"] == How do I make a contribution? -. Create a new branch based off the `3.0-master` branch and switch to it: +To contribute to Red Hat Quay documentation, you must create a new feature branch based off of the `master` branch. + +. Checkout the `master` branch if you have not already: + ---- -$ git checkout -b 3.0-master +$ git checkout master ---- + +. Create a new feature branch based off the `master` branch: + -Substitute with a name that reflects the contribution you intend to make. +---- +$ git checkout -b master +---- ++ +Substitute `` with a name that reflects the contribution you intend to make. -. Edit the files and commit them using `git add` and `git commit`. +. Edit the files and commit them using `git add` and `git commit`. Make your commit in present tense, highlighting the change that you have made. . Push your commits to your fork of the upstream repository: + @@ -70,37 +80,41 @@ Substitute with a name that reflects the contribution you intend t $ git push origin ---- -. Create a pull request from `/` to `quay/3.0-master`. For that, either: +. Create a pull request from `/` to `quay/master`. For that, either: + -- -.. visit the link from the output of the previous step (the link is there after the first push only) -.. navigate to https://github.com//quay-docs and use the interface to create the pull request +.. Visit the link from the output of the previous step. The link is there after the first push only. + +.. Navigate to https://github.com//quay-docs. Use the interface to create the pull request -- + -As you create the pull request, tag one of the repository collaborators and ask them to review the pull request. +As you create the pull request, tag one of the repository collaborators and ask them to review the pull request. The default contact should be Steven Smith. -. Work together with the reviewer to finish your pull request, then ask them to merge it. +. Work with the reviewer to finish your pull request. After the suggested changes have been made, the reviewer will merge the pull request. -. Shortly after your pull request is merged into the `3.0-master` branch, your updates will become live in the https://docs.projectquay.io[Project Quay documentation]. +. After your pull request is merged into the `master` branch, your updates will become live in the https://docs.projectquay.io[Project Quay documentation]. Eventually, those changes will end up on the portal. == How do I make a contribution to the downstream documentation? -Downstream documentation resides in the `3.0-stage` branch of the https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/[downstream repository]. However, Quay documentation is upstream-first. This means that all changes, including downstream-only content, first go to the `3.0-master` branch of the https://github.com/quay/quay-docs[upstream repository]. After that, they are pushed to the `3.0-master` branch of the downstream repository and get merged to the `3.0-stage` branch. +Like upstream documentation, downstream documentation primarily resides in the `master` branch of the https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/[downstream repository]. For most users, the only necessary step is to create a feature branch from the `master` branch. -In other words, to contribute to the downstream documentation: +To make a contribution to upstream documentation, follow the instructions at <>. Be sure to work with the documentation lead for Red Hat Quay to get the content reviewed, merged, and published on the downstream portal. -. Make your contribution in the upstream: <> -. Move it over to the downstream repository: <> +=== How Red Hat Quay downstream documentation is branched -[id="how-do-i-keep-my-local-3.0-master-up-to-date-with-remote-3.0-master"] -== How do I keep my local `3.0-master` up-to-date with remote `3.0-master`? +After you have created and merged a pull request, relevant branches are then reset to match the `master` branch. For example, if the current version of Red Hat Quay is 3.10, then the relevant 3.10 branch (`redhat-3.10`) is reset to match the `master`. branch. This ensures that the most recent content changes are up to date in the most recent version branch. -As other people push to the `3.0-master` branch in the https://github.com/quay/quay-docs[upstream repository], you need to keep your local `3.0-master` up-to-date. It is optimal to do it regularly, for example, daily. +After the the most recent branch is reset to match the `master` branch, the `3.0-stage` branch is then reset to match the most recent version branch (for example, `3.0-stage` is reset to match `redhat-3.10`). The reason for this is that the Red Hat Quay `3` version is copied directly from the most recent version of Red Hat Quay. -. Switch to `3.0-master`: +[id="how-do-i-keep-my-local-master-up-to-date-with-remote-master"] +== How do I keep my local `master` up-to-date with remote `master`? + +As contributors push and merge pull requests to the `master` branch, you must keep your local `master` branch up to date. Prior to making any changes to the documentation, you should rebase your local `master` branch to match the most recent version of the remote `master` branch. + +. Check out the `master` branch: + ---- -$ git checkout 3.0-master +$ git checkout master ---- . Fetch the commits that are in the upstream repository but not in your local repository: @@ -109,17 +123,19 @@ $ git checkout 3.0-master $ git fetch upstream ---- -. Apply the fetched commits to your local `3.0-master`: +. Apply the fetched commits to your local `master`: + ---- -$ git rebase upstream/3.0-master +$ git rebase upstream/master ---- -== How do I keep my feature branch up-to-date with 3.0-master? +Now, your local `master` branch is up to date. -As new commits appear on the `3.0-master` branch, your existing feature branch does not automatically incorporate those commits. To prevent your feature branch and `3.0-master` from diverging, you need to manually update your feature branch to `3.0-master`: +== How do I keep my feature branch up-to-date with the master branch? -. Bring your local `3.0-master` up-to-date with the remote `3.0-master`. <> +As new commits appear on the `master` branch, your existing feature branch does not automatically incorporate those commits. To prevent your feature branch and `master` from diverging, you need to manually update your feature branch to the `master` branch: + +. Bring your local `master` brnach up-to-date with the remote `master` branch by following the instructions at <>. . Switch to the feature branch that you want to update: + @@ -127,18 +143,18 @@ As new commits appear on the `3.0-master` branch, your existing feature branch d $ git checkout ---- -. Apply the commits from `3.0-master` to : +. Apply the commits from the `master` branch to your ``: + ---- -$ git rebase upstream/3.0-master +$ git rebase upstream/master ---- + -. Push the updated to your fork of the upstream repository. Since your local has been updated, it might be incompatible with the remote , so you need to use the `--force` option: +. Push the updated `` to your fork of the upstream repository. Since your local `` has been updated, it might be incompatible with the remote ``, so you need to use the `--force` option: + [IMPORTANT] ==== -Never use the `--force` argument when pushing to `3.0-master`. +Never use the `--force` argument when pushing to `master`. ==== + ---- @@ -147,6 +163,7 @@ $ git push --force origin ifdef::downstream[] +//// [id="how-do-i-keep-the-downstream-repository-and-branch-up-to-date"] == How do I keep the downstream repository and branch up-to-date? @@ -185,13 +202,12 @@ $ git push downstream ---- endif::downstream[] +//// == How do I make content appear in upstream but not in downstream? -If you need to make a part of content appear only https://docs.projectquay.io[in the upstream documentation]: +You can make content appear only in the upstream by using the `ifdef::upstream` conditional around the content that you only want to appear upstream. For example: -* Surround the content with these lines: -+ ---- \ifdef::upstream[] @@ -201,10 +217,8 @@ If you need to make a part of content appear only https://docs.projectquay.io[in ifdef::downstream[] == How do I make content appear in downstream but not in upstream? -If you need to make a part of content appear only https://access.redhat.com/documentation/en-us/red_hat_quay/3/[in the downstream documentation]: +You can make content appear only in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/[downstream] by using the `ifdef::downstream` conditional around the content that you only want to appear downstream. For example: -* Surround the content with these lines: -+ ---- \ifdef::downstream[] diff --git a/access_permissions_management/docinfo.xml b/access_permissions_management/docinfo.xml new file mode 100644 index 000000000..45f4c60ad --- /dev/null +++ b/access_permissions_management/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Managing access and permissions + + Managing access and permissions: Roles, Robots, and Repository Security + + + Red Hat OpenShift Documentation Team + + diff --git a/access_permissions_management/master.adoc b/access_permissions_management/master.adoc new file mode 100644 index 000000000..e74b99f15 --- /dev/null +++ b/access_permissions_management/master.adoc @@ -0,0 +1,50 @@ +include::modules/attributes.adoc[] + +:_content-type: ASSEMBLY +[id="access-permissions-management-quay"] += Managing access and permissions +:context: quay-security + +{productname} offers a comprehensive permissions model, which allows administrators the ability to control who can access, manage, and modify repositories at a granular level. The following sections show you how to manage user access, define team roles, set permissions for users and robot accounts, and define the visibility of a repository. These guides include instructions using both the {productname} UI and the API. + +The following topics are covered: + +* Role-based access controls +* Adjusting repository visibility +* Creating and managing robot accounts +* Clair vulnerability reporting + +//rbac + +include::modules/role-based-access-control-intro.adoc[leveloffset=+1] +include::modules/teams-overview.adoc[leveloffset=+2] +include::modules/set-team-role.adoc[leveloffset=+3] +include::modules/managing-team-members-repo-permissions-ui.adoc[leveloffset=+3] +include::modules/setting-role-of-team-within-organization-api.adoc[leveloffset=+3] +include::modules/default-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/default-permissions-api.adoc[leveloffset=+2] +include::modules/allow-access-user-repo.adoc[leveloffset=+2] +include::modules/adjust-access-user-repo-api.adoc[leveloffset=+2] + +//Registry restriction +include::modules/registry-wide-access-management.adoc[leveloffset=+1] +include::modules/managing-restricted-users.adoc[leveloffset=+2] +include::modules/managing-superuser-full-access.adoc[leveloffset=+2] + +//private repo +include::modules/proc_use-quay-create-repo.adoc[leveloffset=+1] +include::modules/adjusting-repository-visibility-via-the-ui.adoc[leveloffset=+2] +include::modules/adjusting-repository-access-via-the-api.adoc[leveloffset=+2] + +//robot accounts +include::modules/robot-account-overview.adoc[leveloffset=+1] +include::modules/creating-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/creating-robot-account-api.adoc[leveloffset=+2] +include::modules/managing-robot-account-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/disabling-robot-account.adoc[leveloffset=+2] +include::modules/regenerating-robot-account-token-api.adoc[leveloffset=+2] +include::modules/deleting-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/deleting-robot-account-api.adoc[leveloffset=+2] + +//clair +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] diff --git a/access_permissions_management/modules b/access_permissions_management/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/access_permissions_management/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/api-v2-public.yaml b/api-v2-public.yaml new file mode 100644 index 000000000..db3527a6c --- /dev/null +++ b/api-v2-public.yaml @@ -0,0 +1,521 @@ +openapi: 3.0.3 +info: + title: Quay API + version: 1.0.0 +servers: + - url: "{protocol}://{host}" + description: "Set your own Quay registry URL" + variables: + protocol: + default: "https" + enum: + - "http" + - "https" + host: + default: "quay-server.example.com" + description: "Enter your Quay registry hostname" + +security: + - BearerAuth: [] + +tags: + - name: "Application specific tokens" + description: "Manage application-specific tokens by using the API" + - name: Builds + description: API endpoints for managing Quay repository builds + - name: Discovery + description: API discovery information + - name: Error + description: Obtain error details by using the API +paths: + /api/v1/user/apptoken: + get: + tags: + - "Application specific tokens" + summary: List app-specific tokens + description: Retrieves a list of application-specific tokens for the user. + operationId: listAppTokens + security: + - BearerAuth: [] + parameters: + - name: expiring + in: query + required: false + schema: + type: boolean + description: "If true, only returns those tokens expiring soon" + responses: + '200': + description: Successful invocation + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + post: + tags: + - "Application specific tokens" + summary: Create a new app-specific token + description: Creates a new application-specific token for the user. + operationId: createAppToken + security: + - BearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + title: + type: string + example: "MyAppToken" + responses: + '201': + description: Successful creation + content: + application/json: + schema: + type: object + properties: + token: + type: string + example: "abc123xyz" + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + + /api/v1/user/apptoken/{token_uuid}: + get: + tags: + - "Application specific tokens" + summary: Get details of a specific app token + description: Retrieves details for a specific application token. + operationId: getAppToken + security: + - BearerAuth: [] + parameters: + - name: token_uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Successful invocation + content: + application/json: + schema: + type: object + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + delete: + tags: + - "Application specific tokens" + summary: Revoke a specific app token + description: Revokes a specific application token for the user. + operationId: revokeAppToken + security: + - BearerAuth: [] + parameters: + - name: token_uuid + in: path + required: true + schema: + type: string + responses: + '204': + description: Deleted + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' +#build + /api/v1/repository/{repository}/build/{build_uuid}/status: + get: + tags: + - "Builds" + summary: Return the status for the builds specified by the build UUID + parameters: + - name: repository + in: path + required: true + schema: + type: string + description: The full path of the repository (e.g., namespace/name) + - name: build_uuid + in: path + required: true + schema: + type: string + description: The UUID of the build + responses: + "200": + description: Successful invocation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + + /api/v1/repository/{repository}/build/{build_uuid}/logs: + get: + tags: + - "Builds" + summary: Return the build logs for the specified build UUID + parameters: + - name: repository + in: path + required: true + schema: + type: string + - name: build_uuid + in: path + required: true + schema: + type: string + responses: + "200": + description: Successful invocation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + + /api/v1/repository/{repository}/build/{build_uuid}: + get: + tags: + - "Builds" + summary: Returns information about a build + parameters: + - name: repository + in: path + required: true + schema: + type: string + - name: build_uuid + in: path + required: true + schema: + type: string + responses: + "200": + description: Successful invocation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + + delete: + tags: + - "Builds" + summary: Cancel a repository build + parameters: + - name: repository + in: path + required: true + schema: + type: string + - name: build_uuid + in: path + required: true + schema: + type: string + responses: + "204": + description: Deleted + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + + /api/v1/repository/{repository}/build/: + post: + tags: + - "Builds" + summary: Request a repository build and push + parameters: + - name: repository + in: path + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + type: object + properties: + file_id: + type: string + archive_url: + type: string + subdirectory: + type: string + dockerfile_path: + type: string + context: + type: string + pull_robot: + type: string + tags: + type: array + items: + type: string + responses: + "201": + description: Successful creation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + get: + tags: + - "Builds" + summary: Get the list of repository builds + parameters: + - name: repository + in: path + required: true + schema: + type: string + - name: since + in: query + schema: + type: integer + description: Returns all builds since the given Unix timestamp + - name: limit + in: query + schema: + type: integer + description: The maximum number of builds to return + responses: + "200": + description: Successful invocation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + /api/v1/discovery: + get: + tags: + - "Discovery" + summary: List all available API endpoints + description: Returns a list of all API endpoints available in the Swagger API format. + operationId: getDiscovery + parameters: + - name: internal + in: query + description: Whether to include internal APIs. + required: false + schema: + type: boolean + responses: + '200': + description: Successful invocation + content: + application/json: + schema: + type: array + items: + type: string + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + + /api/v1/error/{error_type}: + get: + tags: + - "Error" + summary: Get a detailed description of the error + description: Retrieves a detailed description of the specified error type. + operationId: getErrorDescription + parameters: + - name: error_type + in: path + description: The error code identifying the type of error. + required: true + schema: + type: string + responses: + '200': + description: Successful invocation + content: + application/json: + schema: + $ref: '#/components/schemas/ApiErrorDescription' + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + +components: + securitySchemes: + BearerAuth: + type: http + scheme: bearer + schemas: + ApiError: + type: object + properties: + status: + type: integer + description: HTTP status code of the error. + detail: + type: string + description: A short message describing the error. + ApiErrorDescription: + type: object + properties: + error: + type: string + description: The error code. + message: + type: string + description: A detailed description of the error. \ No newline at end of file diff --git a/api/master.adoc b/api/master.adoc index 1761c7c84..271e62b72 100644 --- a/api/master.adoc +++ b/api/master.adoc @@ -1,246 +1,134 @@ -:_content-type: CONCEPT - - +:_content-type: ASSEMBLY include::modules/attributes.adoc[] - [id="api"] - -= {productname} API Guide - -The {productname} application programming interface (API) is an OAuth 2 RESTful API that consists of a set of endpoints for adding, displaying, changing and deleting features for {productname}. - -{productname} abides by the link:https://semver.org/#summary[Semantic Versioning (SemVer) specifications]. The following conditions are met with each major, minor, and patch release: - -* Major versions of {productname} might include incompatible API changes. For example, the API of {productname} 2.0 differs from {productname} 3.0. -* Minor versions of {productname}, for example, 3.y, adds functionality in a backwards compatible manner. -* Patch versions of {productname}, for example, 3.y.z, introduces backwards compatible bug fixes. - -Currently, {productname} uses the `api/v1` endpoint for 3.y.z releases. - -This guide describes the `api/v1` endpoints and the browser-based examples for accessing those endpoints. - -include::modules/proc_use-api.adoc[leveloffset=+1] - - - -== {productname} Application Programming Interface (API) -[id="ref-api-quay"] - -This API allows you to perform many of the operations required to work with {productname} repositories, users, and organizations. - -include::modules/api-authorization.adoc[leveloffset=+2] - - -include::modules/api-appspecifictokens.adoc[leveloffset=+2] -include::modules/api-appspecifictokens-createAppToken.adoc[leveloffset=+3] -include::modules/api-appspecifictokens-listAppTokens.adoc[leveloffset=+3] -include::modules/api-appspecifictokens-getAppToken.adoc[leveloffset=+3] -include::modules/api-appspecifictokens-revokeAppToken.adoc[leveloffset=+3] - -include::modules/api-build.adoc[leveloffset=+2] -include::modules/api-build-getRepoBuildStatus.adoc[leveloffset=+3] -include::modules/api-build-getRepoBuildLogs.adoc[leveloffset=+3] -include::modules/api-build-getRepoBuild.adoc[leveloffset=+3] -include::modules/api-build-cancelRepoBuild.adoc[leveloffset=+3] -include::modules/api-build-requestRepoBuild.adoc[leveloffset=+3] -include::modules/api-build-getRepoBuilds.adoc[leveloffset=+3] - -include::modules/api-discovery.adoc[leveloffset=+2] -include::modules/api-discovery-discovery.adoc[leveloffset=+3] - -include::modules/api-error.adoc[leveloffset=+2] -include::modules/api-error-getErrorDescription.adoc[leveloffset=+3] - -include::modules/api-globalmessages.adoc[leveloffset=+2] -include::modules/api-globalmessages-createGlobalMessage.adoc[leveloffset=+3] -include::modules/api-globalmessages-getGlobalMessages.adoc[leveloffset=+3] -include::modules/api-globalmessages-deleteGlobalMessage.adoc[leveloffset=+3] - -include::modules/api-logs.adoc[leveloffset=+2] -include::modules/api-logs-getAggregateUserLogs.adoc[leveloffset=+3] -include::modules/api-logs-exportUserLogs.adoc[leveloffset=+3] -include::modules/api-logs-listUserLogs.adoc[leveloffset=+3] -include::modules/api-logs-getAggregateOrgLogs.adoc[leveloffset=+3] -include::modules/api-logs-exportOrgLogs.adoc[leveloffset=+3] -include::modules/api-logs-listOrgLogs.adoc[leveloffset=+3] -include::modules/api-logs-getAggregateRepoLogs.adoc[leveloffset=+3] -include::modules/api-logs-exportRepoLogs.adoc[leveloffset=+3] -include::modules/api-logs-listRepoLogs.adoc[leveloffset=+3] - -include::modules/api-manifest.adoc[leveloffset=+2] -include::modules/api-manifest-getManifestLabel.adoc[leveloffset=+3] -include::modules/api-manifest-deleteManifestLabel.adoc[leveloffset=+3] -include::modules/api-manifest-addManifestLabel.adoc[leveloffset=+3] -include::modules/api-manifest-listManifestLabels.adoc[leveloffset=+3] -include::modules/api-manifest-getRepoManifest.adoc[leveloffset=+3] - -include::modules/api-mirror.adoc[leveloffset=+2] -include::modules/api-mirror-syncCancel.adoc[leveloffset=+3] -include::modules/api-mirror-syncNow.adoc[leveloffset=+3] -include::modules/api-mirror-getRepoMirrorConfig.adoc[leveloffset=+3] -include::modules/api-mirror-changeRepoMirrorConfig.adoc[leveloffset=+3] -include::modules/api-mirror-createRepoMirrorConfig.adoc[leveloffset=+3] - -include::modules/api-namespacequota.adoc[leveloffset=+2] -include::modules/api-namespacequota-listUserQuota.adoc[leveloffset=+3] -include::modules/api-namespacequota-getOrganizationQuotaLimit.adoc[leveloffset=+3] -include::modules/api-namespacequota-changeOrganizationQuotaLimit.adoc[leveloffset=+3] -include::modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc[leveloffset=+3] -include::modules/api-namespacequota-createOrganizationQuotaLimit.adoc[leveloffset=+3] -include::modules/api-namespacequota-listOrganizationQuotaLimit.adoc[leveloffset=+3] -include::modules/api-namespacequota-getUserQuotaLimit.adoc[leveloffset=+3] -include::modules/api-namespacequota-listUserQuotaLimit.adoc[leveloffset=+3] -include::modules/api-namespacequota-getOrganizationQuota.adoc[leveloffset=+3] -include::modules/api-namespacequota-changeOrganizationQuota.adoc[leveloffset=+3] -include::modules/api-namespacequota-deleteOrganizationQuota.adoc[leveloffset=+3] -include::modules/api-namespacequota-createOrganizationQuota.adoc[leveloffset=+3] -include::modules/api-namespacequota-listOrganizationQuota.adoc[leveloffset=+3] -include::modules/api-namespacequota-getUserQuota.adoc[leveloffset=+3] - -include::modules/api-organization.adoc[leveloffset=+2] -include::modules/api-organization-createOrganization.adoc[leveloffset=+3] -include::modules/api-organization-validateProxyCacheConfig.adoc[leveloffset=+3] -include::modules/api-organization-getOrganizationCollaborators.adoc[leveloffset=+3] -include::modules/api-organization-getOrganizationApplication.adoc[leveloffset=+3] -include::modules/api-organization-updateOrganizationApplication.adoc[leveloffset=+3] -include::modules/api-organization-deleteOrganizationApplication.adoc[leveloffset=+3] -include::modules/api-organization-createOrganizationApplication.adoc[leveloffset=+3] -include::modules/api-organization-getOrganizationApplications.adoc[leveloffset=+3] -include::modules/api-organization-getProxyCacheConfig.adoc[leveloffset=+3] -include::modules/api-organization-deleteProxyCacheConfig.adoc[leveloffset=+3] -include::modules/api-organization-createProxyCacheConfig.adoc[leveloffset=+3] -include::modules/api-organization-getOrganizationMember.adoc[leveloffset=+3] -include::modules/api-organization-removeOrganizationMember.adoc[leveloffset=+3] -include::modules/api-organization-getOrganizationMembers.adoc[leveloffset=+3] -include::modules/api-organization-getOrganization.adoc[leveloffset=+3] -include::modules/api-organization-changeOrganizationDetails.adoc[leveloffset=+3] -include::modules/api-organization-deleteAdminedOrganization.adoc[leveloffset=+3] -include::modules/api-organization-getApplicationInformation.adoc[leveloffset=+3] - -include::modules/api-permission.adoc[leveloffset=+2] -include::modules/api-permission-getUserTransitivePermission.adoc[leveloffset=+3] -include::modules/api-permission-getUserPermissions.adoc[leveloffset=+3] -include::modules/api-permission-changeUserPermissions.adoc[leveloffset=+3] -include::modules/api-permission-deleteUserPermissions.adoc[leveloffset=+3] -include::modules/api-permission-getTeamPermissions.adoc[leveloffset=+3] -include::modules/api-permission-changeTeamPermissions.adoc[leveloffset=+3] -include::modules/api-permission-deleteTeamPermissions.adoc[leveloffset=+3] -include::modules/api-permission-listRepoTeamPermissions.adoc[leveloffset=+3] -include::modules/api-permission-listRepoUserPermissions.adoc[leveloffset=+3] - -include::modules/api-prototype.adoc[leveloffset=+2] -include::modules/api-prototype-updateOrganizationPrototypePermission.adoc[leveloffset=+3] -include::modules/api-prototype-deleteOrganizationPrototypePermission.adoc[leveloffset=+3] -include::modules/api-prototype-createOrganizationPrototypePermission.adoc[leveloffset=+3] -include::modules/api-prototype-getOrganizationPrototypePermissions.adoc[leveloffset=+3] - -include::modules/api-repository.adoc[leveloffset=+2] -include::modules/api-repository-createRepo.adoc[leveloffset=+3] -include::modules/api-repository-listRepos.adoc[leveloffset=+3] -include::modules/api-repository-changeRepoVisibility.adoc[leveloffset=+3] -include::modules/api-repository-changeRepoState.adoc[leveloffset=+3] -include::modules/api-repository-getRepo.adoc[leveloffset=+3] -include::modules/api-repository-updateRepo.adoc[leveloffset=+3] -include::modules/api-repository-deleteRepository.adoc[leveloffset=+3] - -include::modules/api-repositorynotification.adoc[leveloffset=+2] -include::modules/api-repositorynotification-testRepoNotification.adoc[leveloffset=+3] -include::modules/api-repositorynotification-getRepoNotification.adoc[leveloffset=+3] -include::modules/api-repositorynotification-deleteRepoNotification.adoc[leveloffset=+3] -include::modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc[leveloffset=+3] -include::modules/api-repositorynotification-createRepoNotification.adoc[leveloffset=+3] -include::modules/api-repositorynotification-listRepoNotifications.adoc[leveloffset=+3] - -include::modules/api-repotoken.adoc[leveloffset=+2] -include::modules/api-repotoken-getTokens.adoc[leveloffset=+3] -include::modules/api-repotoken-changeToken.adoc[leveloffset=+3] -include::modules/api-repotoken-deleteToken.adoc[leveloffset=+3] -include::modules/api-repotoken-createToken.adoc[leveloffset=+3] -include::modules/api-repotoken-listRepoTokens.adoc[leveloffset=+3] - -include::modules/api-robot.adoc[leveloffset=+2] -include::modules/api-robot-getUserRobots.adoc[leveloffset=+3] -include::modules/api-robot-getOrgRobotPermissions.adoc[leveloffset=+3] -include::modules/api-robot-regenerateOrgRobotToken.adoc[leveloffset=+3] -include::modules/api-robot-getUserRobotPermissions.adoc[leveloffset=+3] -include::modules/api-robot-regenerateUserRobotToken.adoc[leveloffset=+3] -include::modules/api-robot-getOrgRobot.adoc[leveloffset=+3] -include::modules/api-robot-createOrgRobot.adoc[leveloffset=+3] -include::modules/api-robot-deleteOrgRobot.adoc[leveloffset=+3] -include::modules/api-robot-getOrgRobots.adoc[leveloffset=+3] -include::modules/api-robot-getUserRobot.adoc[leveloffset=+3] -include::modules/api-robot-createUserRobot.adoc[leveloffset=+3] -include::modules/api-robot-deleteUserRobot.adoc[leveloffset=+3] - -include::modules/api-search.adoc[leveloffset=+2] -include::modules/api-search-conductRepoSearch.adoc[leveloffset=+3] -include::modules/api-search-conductSearch.adoc[leveloffset=+3] -include::modules/api-search-getMatchingEntities.adoc[leveloffset=+3] - -include::modules/api-secscan.adoc[leveloffset=+2] -include::modules/api-secscan-getRepoManifestSecurity.adoc[leveloffset=+3] - -include::modules/api-superuser.adoc[leveloffset=+2] -include::modules/api-superuser-createInstallUser.adoc[leveloffset=+3] -include::modules/api-superuser-listAllUsers.adoc[leveloffset=+3] -include::modules/api-superuser-listAllLogs.adoc[leveloffset=+3] -include::modules/api-superuser-createServiceKey.adoc[leveloffset=+3] -include::modules/api-superuser-listServiceKeys.adoc[leveloffset=+3] -include::modules/api-superuser-changeUserQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-deleteUserQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-createUserQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-listUserQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-changeOrganizationQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-createOrganizationQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-listOrganizationQuotaSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-changeOrganization.adoc[leveloffset=+3] -include::modules/api-superuser-deleteOrganization.adoc[leveloffset=+3] -include::modules/api-superuser-approveServiceKey.adoc[leveloffset=+3] -include::modules/api-superuser-getServiceKey.adoc[leveloffset=+3] -include::modules/api-superuser-updateServiceKey.adoc[leveloffset=+3] -include::modules/api-superuser-deleteServiceKey.adoc[leveloffset=+3] -include::modules/api-superuser-getRepoBuildStatusSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-getRepoBuildSuperUser.adoc[leveloffset=+3] -include::modules/api-superuser-getRepoBuildLogsSuperUser.adoc[leveloffset=+3] - -include::modules/api-tag.adoc[leveloffset=+2] -include::modules/api-tag-restoreTag.adoc[leveloffset=+3] -include::modules/api-tag-changeTag.adoc[leveloffset=+3] -include::modules/api-tag-deleteFullTag.adoc[leveloffset=+3] -include::modules/api-tag-listRepoTags.adoc[leveloffset=+3] - -include::modules/api-team.adoc[leveloffset=+2] -include::modules/api-team-getOrganizationTeamPermissions.adoc[leveloffset=+3] -include::modules/api-team-updateOrganizationTeamMember.adoc[leveloffset=+3] -include::modules/api-team-deleteOrganizationTeamMember.adoc[leveloffset=+3] -include::modules/api-team-getOrganizationTeamMembers.adoc[leveloffset=+3] -include::modules/api-team-inviteTeamMemberEmail.adoc[leveloffset=+3] -include::modules/api-team-deleteTeamMemberEmailInvite.adoc[leveloffset=+3] -include::modules/api-team-updateOrganizationTeam.adoc[leveloffset=+3] -include::modules/api-team-deleteOrganizationTeam.adoc[leveloffset=+3] - -include::modules/api-trigger.adoc[leveloffset=+2] -include::modules/api-trigger-activateBuildTrigger.adoc[leveloffset=+3] -include::modules/api-trigger-listTriggerRecentBuilds.adoc[leveloffset=+3] -include::modules/api-trigger-manuallyStartBuildTrigger.adoc[leveloffset=+3] -include::modules/api-trigger-getBuildTrigger.adoc[leveloffset=+3] -include::modules/api-trigger-updateBuildTrigger.adoc[leveloffset=+3] -include::modules/api-trigger-deleteBuildTrigger.adoc[leveloffset=+3] -include::modules/api-trigger-listBuildTriggers.adoc[leveloffset=+3] - -include::modules/api-user.adoc[leveloffset=+2] -include::modules/api-user-createStar.adoc[leveloffset=+3] -include::modules/api-user-listStarredRepos.adoc[leveloffset=+3] -include::modules/api-user-getLoggedInUser.adoc[leveloffset=+3] -include::modules/api-user-deleteStar.adoc[leveloffset=+3] -include::modules/api-user-getUserInformation.adoc[leveloffset=+3] - -include::modules/api-definitions.adoc[leveloffset=+2] - -// do not remove -[id="api-config-examples"] -== API configuration examples - -include::modules/external-registry-config-api-example.adoc[leveloffset=+2] -include::modules/root-rule-config-api-example.adoc[leveloffset=+2] += {productname} API guide +:context: use-api + +The {productname} application programming interface (API) provides a comprehensive, RESTful interface for managing and automating tasks within {productname}. Designed around the link:https://oauth.net/2/[_OAuth 2.0 protocol_], this API enables secure, fine-grained access to {productname} resources, and allows administrators and users to perform such actions as creating repositories, managing images, setting permissions, and more. + +{productname} follows Semantic Versioning (SemVer) principles, ensuring predictable API stability across releases, such as: + +* *Major releases*: Introduce new capabilities. Might include breaking changes to API compatibility. For example, the API of {productname} _2.0_ differs from {productname} _3.0_. + +* *Minor releases*: Add new functionality in a backward-compatible manner. For example, a _3.y_ release adds functionality to the version _3._ release. + +* *Patch releases*: Deliver bug fixes and improvements while preserving backward compatibility with minor releases, such as _3.y.z_. + +The following guide describes the {productname} API in more detail, and provides details on the following topics: + +* OAuth 2 access tokens and how they compare to traditional API tokens and {productname}'s robot tokens +* Generating an OAuth 2 access token +* Best practices for token management +* OAuth 2 access token capabilities +* Using the {productname} API +* {productname} API configuration examples + +This guide is accompanied with a second guide, link:https://docs.redhat.com/en/documentation/red_hat_quay/3.13/html/red_hat_quay_api_reference/index[{productname} API reference], that provides information about all `api/v1` endpoints and how to access those endpoints with example commands. + +//overview +include::modules/token-overview.adoc[leveloffset=+1] + +//creating oauth 2 access token +include::modules/oauth2-access-tokens.adoc[leveloffset=+1] +include::modules/creating-oauth-access-token.adoc[leveloffset=+2] +include::modules/reassigning-oauth-access-token.adoc[leveloffset=+2] +include::modules/deleting-oauth-access-token.adoc[leveloffset=+2] + +//robot account tokens +include::modules/robot-account-tokens.adoc[leveloffset=+1] +include::modules/regenerating-robot-account-token-ui.adoc[leveloffset=+2] +include::modules/regenerating-robot-account-token-api.adoc[leveloffset=+2] + +//oci referrers +include::modules/oci-referrers-oauth-access-token.adoc[leveloffset=+1] +include::modules/creating-v2-oauth-access-token.adoc[leveloffset=+2] + +//how to use the API +include::modules/enabling-using-the-api.adoc[leveloffset=+1] +include::modules/configuring-api-calls.adoc[leveloffset=+2] +include::modules/using-the-api.adoc[leveloffset=+2] +include::modules/accessing-swagger-ui.adoc[leveloffset=+2] +include::modules/automating-quay-using-the-api.adoc[leveloffset=+2] + + +//API examples +include::modules/quay-api-examples.adoc[leveloffset=+1] +//application +include::modules/creating-oauth-application-api.adoc[leveloffset=+2] +//discovery +include::modules/discovering-quay-api-endpoints.adoc[leveloffset=+2] +//error +include::modules/quay-error-details.adoc[leveloffset=+2] +//global message +include::modules/api-global-messages.adoc[leveloffset=+2] +//viewing logs (aggregate) +include::modules/viewing-usage-logs-api.adoc[leveloffset=+2] +//exporting logs +include::modules/use-quay-export-logs-api.adoc[leveloffset=+2] +//manifest label +include::modules/adding-managing-labels-api.adoc[leveloffset=+2] +//mirror +include::modules/mirror-quay-api.adoc[leveloffset=+2] +//quota +include::modules/quota-management-api.adoc[leveloffset=+2] +//quota (organization) +include::modules/quota-organization-management-api.adoc[leveloffset=+3] +// quota limits organization +include::modules/quota-limit-api.adoc[leveloffset=+3] +//quota (user limits and policies) +include::modules/quota-limit-user-api.adoc[leveloffset=+3] +//organization +include::modules/organization-management-api.adoc[leveloffset=+2] +//org creation +include::modules/org-create-api.adoc[leveloffset=+3] +include::modules/org-delete-api.adoc[leveloffset=+3] +//member management +include::modules/org-team-member-api.adoc[leveloffset=+3] +//application +include::modules/org-application-create-api.adoc[leveloffset=+3] +//proxy-cache +include::modules/org-proxy-cache-configuration-api.adoc[leveloffset=+3] +//permission +include::modules/repo-permission-api.adoc[leveloffset=+2] +include::modules/user-permissions-repo.adoc[leveloffset=+3] +include::modules/team-permissions-api.adoc[leveloffset=+3] +//policy +include::modules/repo-policy-api.adoc[leveloffset=+2] +include::modules/creating-org-policy-api.adoc[leveloffset=+3] +include::modules/creating-policy-api-current-user.adoc[leveloffset=+3] +include::modules/creating-repository-policy-api.adoc[leveloffset=+3] +include::modules/creating-policy-api-other-user.adoc[leveloffset=+3] +//repo +include::modules/repo-manage-api.adoc[leveloffset=+2] +include::modules/repo-creation-management.adoc[leveloffset=+3] +include::modules/creating-notifications-api.adoc[leveloffset=+3] +//robot account +include::modules/robot-account-manage-api.adoc[leveloffset=+2] +include::modules/creating-robot-account-api.adoc[leveloffset=+3] +include::modules/robot-account-permissions-api.adoc[leveloffset=+3] +include::modules/deleting-robot-account-api.adoc[leveloffset=+3] +//search +include::modules/search-api.adoc[leveloffset=+2] +//sec-scan +include::modules/security-scanning-api.adoc[leveloffset=+2] +//superuser +include::modules/superuser-manage-api.adoc[leveloffset=+2] +include::modules/creating-user-account-quay-api.adoc[leveloffset=+3] +include::modules/deleting-user-cli-api.adoc[leveloffset=+3] +include::modules/managing-organization-superuser-api.adoc[leveloffset=+3] +include::modules/listing-repos-superuser-api.adoc[leveloffset=+3] +include::modules/managing-organization-quota-superuser-api.adoc[leveloffset=+3] +include::modules/managing-user-quota-superuser-api.adoc[leveloffset=+3] +include::modules/retrieving-build-info-superuser-api.adoc[leveloffset=+3] +include::modules/managing-service-keys-api.adoc[leveloffset=+3] +//tag +include::modules/managing-tags-api.adoc[leveloffset=+2] +// team member management +include::modules/managing-teams-api.adoc[leveloffset=+2] +include::modules/managing-team-members-api.adoc[leveloffset=+3] +include::modules/setting-role-of-team-within-organization-api.adoc[leveloffset=+3] +include::modules/deleting-team-within-organization-api.adoc[leveloffset=+3] +//build +include::modules/managing-builds-api.adoc[leveloffset=+2] +//user +include::modules/managing-user-options-api.adoc[leveloffset=+2] \ No newline at end of file diff --git a/api_reference/docinfo.xml b/api_reference/docinfo.xml new file mode 100644 index 000000000..267e8ebfc --- /dev/null +++ b/api_reference/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +{productname} API reference + + {productname} API reference + + + Red Hat OpenShift Documentation Team + + diff --git a/api_reference/master.adoc b/api_reference/master.adoc new file mode 100644 index 000000000..5d35387f3 --- /dev/null +++ b/api_reference/master.adoc @@ -0,0 +1,280 @@ +:_content-type: ASSEMBLY +include::modules/attributes.adoc[] +[id="api-reference"] += {productname} API reference +:context: use-api + +The {productname} application programming interface (API) provides a comprehensive, RESTful interface for managing and automating tasks within {productname}. Designed around the link:https://oauth.net/2/[_OAuth 2.0 protocol_], this API enables secure, fine-grained access to {productname} resources, and allows administrators and users to perform such actions as creating repositories, managing images, setting permissions, and more. + +{productname} follows Semantic Versioning (SemVer) principles, ensuring predictable API stability across releases, such as: + +* *Major releases*: Introduce new capabilities. Might include breaking changes to API compatibility. For example, the API of {productname} _2.0_ differs from {productname} _3.0_. + +* *Minor releases*: Add new functionality in a backward-compatible manner. For example, a _3.y_ release adds functionality to the version _3._ release. + +* *Patch releases*: Deliver bug fixes and improvements while preserving backward compatibility with minor releases, such as _3.y.z_. + +The following guide describes the {productname} API in more detail, and provides details on the following topics: + +* API endpoint structure, including supported HTTP methods +* Request and response schemas for each endpoint +* Required and optional parameters +* Authentication and authorization requirements +* Common error codes and troubleshooting information + +For a more guided approach, including token overview, management strategies, understanding API endpoints, and more, refer to the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/red_hat_quay_api_guide/index[{productname} API guide]. + +include::modules/api-authorization.adoc[leveloffset=+1] + +//example procedures provided +include::modules/api-appspecifictokens.adoc[leveloffset=+1] +include::modules/api-appspecifictokens-createAppToken.adoc[leveloffset=+2] +include::modules/api-appspecifictokens-listAppTokens.adoc[leveloffset=+2] +include::modules/api-appspecifictokens-getAppToken.adoc[leveloffset=+2] +include::modules/api-appspecifictokens-revokeAppToken.adoc[leveloffset=+2] + +include::modules/api-build.adoc[leveloffset=+1] +include::modules/api-build-getRepoBuildStatus.adoc[leveloffset=+2] +include::modules/api-build-getRepoBuildLogs.adoc[leveloffset=+2] +include::modules/api-build-getRepoBuild.adoc[leveloffset=+2] +include::modules/api-build-cancelRepoBuild.adoc[leveloffset=+2] +include::modules/api-build-requestRepoBuild.adoc[leveloffset=+2] +include::modules/api-build-getRepoBuilds.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-discovery.adoc[leveloffset=+1] +include::modules/api-discovery-discovery.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-error.adoc[leveloffset=+1] +include::modules/api-error-getErrorDescription.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-globalmessages.adoc[leveloffset=+1] +include::modules/api-globalmessages-createGlobalMessage.adoc[leveloffset=+2] +include::modules/api-globalmessages-getGlobalMessages.adoc[leveloffset=+2] +include::modules/api-globalmessages-deleteGlobalMessage.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-logs.adoc[leveloffset=+1] +include::modules/api-logs-getAggregateUserLogs.adoc[leveloffset=+2] +include::modules/api-logs-exportUserLogs.adoc[leveloffset=+2] +include::modules/api-logs-listUserLogs.adoc[leveloffset=+2] +include::modules/api-logs-getAggregateOrgLogs.adoc[leveloffset=+2] +include::modules/api-logs-exportOrgLogs.adoc[leveloffset=+2] +include::modules/api-logs-listOrgLogs.adoc[leveloffset=+2] +include::modules/api-logs-getAggregateRepoLogs.adoc[leveloffset=+2] +include::modules/api-logs-exportRepoLogs.adoc[leveloffset=+2] +include::modules/api-logs-listRepoLogs.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-manifest.adoc[leveloffset=+1] +include::modules/api-manifest-getManifestLabel.adoc[leveloffset=+2] +include::modules/api-manifest-deleteManifestLabel.adoc[leveloffset=+2] +include::modules/api-manifest-addManifestLabel.adoc[leveloffset=+2] +include::modules/api-manifest-listManifestLabels.adoc[leveloffset=+2] +include::modules/api-manifest-getRepoManifest.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-mirror.adoc[leveloffset=+1] +include::modules/api-mirror-syncCancel.adoc[leveloffset=+2] +include::modules/api-mirror-syncNow.adoc[leveloffset=+2] +include::modules/api-mirror-getRepoMirrorConfig.adoc[leveloffset=+2] +include::modules/api-mirror-changeRepoMirrorConfig.adoc[leveloffset=+2] +include::modules/api-mirror-createRepoMirrorConfig.adoc[leveloffset=+2] + +//commands provided +include::modules/api-namespacequota.adoc[leveloffset=+1] +include::modules/api-namespacequota-listUserQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-getOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-changeOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-createOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-listOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-getUserQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-listUserQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-getOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-changeOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-deleteOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-createOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-listOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-getUserQuota.adoc[leveloffset=+2] + +//done +include::modules/api-organization.adoc[leveloffset=+1] +include::modules/api-organization-createOrganization.adoc[leveloffset=+2] +include::modules/api-organization-validateProxyCacheConfig.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationCollaborators.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationApplication.adoc[leveloffset=+2] +include::modules/api-organization-updateOrganizationApplication.adoc[leveloffset=+2] +include::modules/api-organization-deleteOrganizationApplication.adoc[leveloffset=+2] +include::modules/api-organization-createOrganizationApplication.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationApplications.adoc[leveloffset=+2] +include::modules/api-organization-getProxyCacheConfig.adoc[leveloffset=+2] +include::modules/api-organization-deleteProxyCacheConfig.adoc[leveloffset=+2] +include::modules/api-organization-createProxyCacheConfig.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationMember.adoc[leveloffset=+2] +include::modules/api-organization-removeOrganizationMember.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationMembers.adoc[leveloffset=+2] +include::modules/api-organization-getOrganization.adoc[leveloffset=+2] +include::modules/api-organization-changeOrganizationDetails.adoc[leveloffset=+2] +include::modules/api-organization-deleteAdminedOrganization.adoc[leveloffset=+2] +include::modules/api-organization-getApplicationInformation.adoc[leveloffset=+2] +//done +include::modules/api-permission.adoc[leveloffset=+1] +include::modules/api-permission-getUserTransitivePermission.adoc[leveloffset=+2] +include::modules/api-permission-getUserPermissions.adoc[leveloffset=+2] +include::modules/api-permission-changeUserPermissions.adoc[leveloffset=+2] +include::modules/api-permission-deleteUserPermissions.adoc[leveloffset=+2] +include::modules/api-permission-getTeamPermissions.adoc[leveloffset=+2] +include::modules/api-permission-changeTeamPermissions.adoc[leveloffset=+2] +include::modules/api-permission-deleteTeamPermissions.adoc[leveloffset=+2] +include::modules/api-permission-listRepoTeamPermissions.adoc[leveloffset=+2] +include::modules/api-permission-listRepoUserPermissions.adoc[leveloffset=+2] +//done but might need example procs +include::modules/api-policy.adoc[leveloffset=+1] +include::modules/api-policy-createOrganizationAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-listOrganizationAutoPrunePolicies.adoc[leveloffset=+2] +include::modules/api-policy-getOrganizationAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-deleteOrganizationAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-updateOrganizationAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-createRepositoryAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-listRepositoryAutoPrunePolicies.adoc[leveloffset=+2] +include::modules/api-policy-getRepositoryAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-deleteRepositoryAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-updateRepositoryAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-createUserAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-listUserAutoPrunePolicies.adoc[leveloffset=+2] +include::modules/api-policy-getUserAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-deleteUserAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-updateUserAutoPrunePolicy.adoc[leveloffset=+2] + +//done +include::modules/api-prototype.adoc[leveloffset=+1] +include::modules/api-prototype-updateOrganizationPrototypePermission.adoc[leveloffset=+2] +include::modules/api-prototype-deleteOrganizationPrototypePermission.adoc[leveloffset=+2] +include::modules/api-prototype-createOrganizationPrototypePermission.adoc[leveloffset=+2] +include::modules/api-prototype-getOrganizationPrototypePermissions.adoc[leveloffset=+2] +//won't do +include::modules/api-referrers.adoc[leveloffset=+1] +include::modules/api-referrers-getReferrers.adoc[leveloffset=+2] + +//done +include::modules/api-repository.adoc[leveloffset=+1] +//do not edit +include::modules/api-repository-createRepo.adoc[leveloffset=+2] +include::modules/api-repository-listRepos.adoc[leveloffset=+2] +include::modules/api-repository-changeRepoVisibility.adoc[leveloffset=+2] +include::modules/api-repository-changeRepoState.adoc[leveloffset=+2] +include::modules/api-repository-getRepo.adoc[leveloffset=+2] +include::modules/api-repository-updateRepo.adoc[leveloffset=+2] +include::modules/api-repository-deleteRepository.adoc[leveloffset=+2] + +//done +include::modules/api-repositorynotification.adoc[leveloffset=+1] +include::modules/api-repositorynotification-testRepoNotification.adoc[leveloffset=+2] +include::modules/api-repositorynotification-getRepoNotification.adoc[leveloffset=+2] +include::modules/api-repositorynotification-deleteRepoNotification.adoc[leveloffset=+2] +include::modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc[leveloffset=+2] +include::modules/api-repositorynotification-createRepoNotification.adoc[leveloffset=+2] +include::modules/api-repositorynotification-listRepoNotifications.adoc[leveloffset=+2] +//done +include::modules/api-robot.adoc[leveloffset=+1] +include::modules/api-robot-getUserRobots.adoc[leveloffset=+2] +include::modules/api-robot-getOrgRobotPermissions.adoc[leveloffset=+2] +include::modules/api-robot-regenerateOrgRobotToken.adoc[leveloffset=+2] +include::modules/api-robot-getUserRobotPermissions.adoc[leveloffset=+2] +include::modules/api-robot-regenerateUserRobotToken.adoc[leveloffset=+2] +include::modules/api-robot-getOrgRobot.adoc[leveloffset=+2] +include::modules/api-robot-createOrgRobot.adoc[leveloffset=+2] +include::modules/api-robot-deleteOrgRobot.adoc[leveloffset=+2] +include::modules/api-robot-getOrgRobots.adoc[leveloffset=+2] +include::modules/api-robot-getUserRobot.adoc[leveloffset=+2] +include::modules/api-robot-createUserRobot.adoc[leveloffset=+2] +include::modules/api-robot-deleteUserRobot.adoc[leveloffset=+2] +include::modules/api-robot-getOrgRobotFederation.adoc[leveloffset=+2] +include::modules/api-robot-createOrgRobotFederation.adoc[leveloffset=+2] +//include::modules/api-robot-deleteOrgRobotFederation.adoc[leveloffset=+2] + +//done +include::modules/api-search.adoc[leveloffset=+1] +include::modules/api-search-conductRepoSearch.adoc[leveloffset=+2] +include::modules/api-search-conductSearch.adoc[leveloffset=+2] +include::modules/api-search-getMatchingEntities.adoc[leveloffset=+2] + +//done +include::modules/api-secscan.adoc[leveloffset=+1] +include::modules/api-secscan-getRepoManifestSecurity.adoc[leveloffset=+2] +//done +include::modules/api-superuser.adoc[leveloffset=+1] +include::modules/api-superuser-createInstallUser.adoc[leveloffset=+2] +include::modules/api-superuser-deleteInstallUser.adoc[leveloffset=+2] +include::modules/api-superuser-listAllUsers.adoc[leveloffset=+2] +include::modules/api-superuser-listAllLogs.adoc[leveloffset=+2] +include::modules/api-superuser-listAllOrganizations.adoc[leveloffset=+2] +include::modules/api-superuser-createServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-listServiceKeys.adoc[leveloffset=+2] +include::modules/api-superuser-changeUserQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-deleteUserQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-createUserQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-listUserQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-changeOrganizationQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-createOrganizationQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-listOrganizationQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-changeOrganization.adoc[leveloffset=+2] +include::modules/api-superuser-deleteOrganization.adoc[leveloffset=+2] +include::modules/api-superuser-approveServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-deleteServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-updateServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-getServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-getRepoBuildStatusSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-getRepoBuildSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-getRepoBuildLogsSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-getRegistrySize.adoc[leveloffset=+2] +include::modules/api-superuser-postRegistrySize.adoc[leveloffset=+2] + +//done +include::modules/api-tag.adoc[leveloffset=+1] +include::modules/api-tag-restoreTag.adoc[leveloffset=+2] +include::modules/api-tag-changeTag.adoc[leveloffset=+2] +include::modules/api-tag-deleteFullTag.adoc[leveloffset=+2] +include::modules/api-tag-listRepoTags.adoc[leveloffset=+2] +//done +include::modules/api-team.adoc[leveloffset=+1] +include::modules/api-team-getOrganizationTeamPermissions.adoc[leveloffset=+2] +include::modules/api-team-updateOrganizationTeamMember.adoc[leveloffset=+2] +include::modules/api-team-deleteOrganizationTeamMember.adoc[leveloffset=+2] +include::modules/api-team-getOrganizationTeamMembers.adoc[leveloffset=+2] +include::modules/api-team-inviteTeamMemberEmail.adoc[leveloffset=+2] +include::modules/api-team-deleteTeamMemberEmailInvite.adoc[leveloffset=+2] +include::modules/api-team-updateOrganizationTeam.adoc[leveloffset=+2] +include::modules/api-team-deleteOrganizationTeam.adoc[leveloffset=+2] +//done +include::modules/api-trigger.adoc[leveloffset=+1] +include::modules/api-trigger-activateBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-listTriggerRecentBuilds.adoc[leveloffset=+2] +include::modules/api-trigger-manuallyStartBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-getBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-updateBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-deleteBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-listBuildTriggers.adoc[leveloffset=+2] + +//done +include::modules/api-user.adoc[leveloffset=+1] +include::modules/api-user-createStar.adoc[leveloffset=+2] +include::modules/api-user-listStarredRepos.adoc[leveloffset=+2] +include::modules/api-user-getLoggedInUser.adoc[leveloffset=+2] +include::modules/api-user-deleteStar.adoc[leveloffset=+2] +include::modules/api-user-getUserInformation.adoc[leveloffset=+2] + +include::modules/api-definitions.adoc[leveloffset=+1] + +//// +// do not remove +[id="api-config-examples"] +== API configuration examples + +include::modules/external-registry-config-api-example.adoc[leveloffset=+2] +include::modules/root-rule-config-api-example.adoc[leveloffset=+2] +//// \ No newline at end of file diff --git a/api_reference/modules b/api_reference/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/api_reference/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/architecture/master.adoc b/architecture/master.adoc index f777281a2..946267ed2 100644 --- a/architecture/master.adoc +++ b/architecture/master.adoc @@ -7,6 +7,7 @@ include::modules/arch-intro.adoc[leveloffset=+1] include::modules/arch-intro-scalability.adoc[leveloffset=+2] include::modules/arch-intro-content-distribution.adoc[leveloffset=+2] include::modules/arch-intro-build-automation.adoc[leveloffset=+2] +include::modules/build-enhanced-arch.adoc[leveloffset=+2] include::modules/arch-intro-integration.adoc[leveloffset=+2] include::modules/arch-intro-security.adoc[leveloffset=+2] include::modules/arch-intro-recent-features.adoc[leveloffset=+2] @@ -50,6 +51,7 @@ include::modules/mirroring-events.adoc[leveloffset=+3] include::modules/mirroring-api-intro.adoc[leveloffset=+3] //geo-repl include::modules/georepl-intro.adoc[leveloffset=+2] +include::modules/arch-georpl-features.adoc[leveloffset=+3] include::modules/georepl-prereqs.adoc[leveloffset=+3] include::modules/georepl-arch-standalone.adoc[leveloffset=+3] include::modules/georepl-arch-operator.adoc[leveloffset=+3] @@ -63,3 +65,9 @@ include::modules/sizing-intro.adoc[leveloffset=+1] include::modules/sizing-sample.adoc[leveloffset=+2] include::modules/subscription-intro.adoc[leveloffset=+2] include::modules/quay-internal-registry-intro.adoc[leveloffset=+2] + +// Quota management +include::modules/quota-management-arch.adoc[leveloffset=+1] + +//Namespace auto-pruning +include::modules/namespace-auto-pruning-arch.adoc[leveloffset=+1] diff --git a/build_docs b/build_docs index ebed327a7..e67ad8784 100755 --- a/build_docs +++ b/build_docs @@ -6,7 +6,7 @@ asciidoctor -a productname="Project Quay" -a toc="left" -d book release_notes/ma asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_quay/master.adoc -D dist -o deploy_quay.html asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_quay_ha/master.adoc -D dist -o deploy_quay_ha.html -asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_quay_on_openshift_op_tng/master.adoc -D dist -o deploy_quay_on_openshift_op_tng.html +asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_red_hat_quay_operator/master.adoc -D dist -o deploy_red_hat_quay_operator.html asciidoctor -a productname="Project Quay" -a toc="left" -d book config_quay/master.adoc -D dist -o config_quay.html asciidoctor -a productname="Project Quay" -a toc="left" -d book manage_quay/master.adoc -D dist -o manage_quay.html @@ -15,6 +15,8 @@ asciidoctor -a productname="Project Quay" -a toc="left" -d book upgrade_quay/mas asciidoctor -a productname="Project Quay" -a toc="left" -d book use_quay/master.adoc -D dist -o use_quay.html asciidoctor -a productname="Project Quay" -a toc="left" -d book api/master.adoc -D dist -o api_quay.html +asciidoctor -a productname="Project Quay" -a toc="left" -d book quay_io/master.adoc -D dist -o quay_io.html + asciidoctor -a productname="Project Quay" -a toc="left" -d book build_quay/master.adoc -D dist -o build_quay.html cp -a images dist/images diff --git a/builders-virtual b/builders-virtual deleted file mode 100644 index 0383c2a99..000000000 --- a/builders-virtual +++ /dev/null @@ -1,94 +0,0 @@ -[[red-hat-quay-quota-builders-establishment]] -= Setting up a {productname} builders environment - -The following procedure describes how you can implement the builders feature in {productname}. - -.Prerequisites - -* Builders require SSL certificates. For more information link:https://access.redhat.com/documentation/en-us/red_hat_quay/2.9/html/manage_red_hat_quay/adding-tls-certificates-to-the-quay-enterprise-container[Adding TLS certificates to the {productname} container]. - - -.Procedure - -[NOTE] -==== -* This procedure assumes you already have a cluster provisioned. -* This procedure is for setting up virtual builders. -==== - -. Log in to your {productname} cluster using your specified username and password: -+ ----- -$ oc login -u $KUBE_USER -p $KUBE_PASSWORD $KUBE_API ----- - -. Create a new namespace for your virtual builders: -+ ----- -$ oc create ns virtual-builders ----- - -. Create a new project for your virtual builders: -+ ----- -$ oc project virtual-builders ----- - -. Create a service account for your virtual builder: -+ ----- -$ oc create sa quay-builder ----- - -. Provide the created service account with editing permissions so that it can run the build: -+ ----- -$ oc adm policy add-role-to-user edit system:serviceaccount:virtual-builders:quay-builder ----- - -. Grant the Quay builder `anyuid scc` permissions: -+ ----- -$ oc adm polcy add-scc-to-user anyuid -z quay-builder ----- -+ -[NOTE] -==== -This action requires cluster admin privileges and is required because for unprivileged or rootless builds to work, they must run as the Podman user. -==== - -. Obtain token for the Quay builder service account: -+ ----- -$ export VIRTUAL_SA_TOKEN=$(oc sa get-token quay-builder -n virtual-builders) \ -yq -i e '.BUILD_MANAGER[1].EXECUTORS[0].SERVICE_ACCOUNT_TOKEN = strenv(VIRTUAL_SA_TOKEN)' ./quay-configs/quay-config.build.virtual.yaml ----- -+ -[NOTE] -+ -==== -Using the `yq` yaml parser extracts the token to the specified Quay configuration file. -==== - -. Install the Quay Operator with a subscription: -+ ----- -$ oc apply -f ./k8s-objects/quay-operator-subscription.yaml -n openshift-opertors ----- -+ -[NOTE] -==== -This step can also be accomplished on the Quay UI. -==== - -. Create a namespace for the Quay deployment: -+ ----- -$ oc create ns quay ----- - -. Set the project context: -+ ----- -$ oc project quay ----- diff --git a/builders_and_image_automation/docinfo.xml b/builders_and_image_automation/docinfo.xml new file mode 100644 index 000000000..e8b3dc5ef --- /dev/null +++ b/builders_and_image_automation/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Builders and image automation + + Understand builders and their role in automating image builds. + + + Red Hat OpenShift Documentation Team + + diff --git a/builders_and_image_automation/master.adoc b/builders_and_image_automation/master.adoc new file mode 100644 index 000000000..0fab3f64f --- /dev/null +++ b/builders_and_image_automation/master.adoc @@ -0,0 +1,43 @@ +include::modules/attributes.adoc[] + +:_content-type: ASSEMBLY +[id="quay-builders-image-automation"] += Builders and image automation +:context: quay-builders-image-automation + +The following guide shows you how to configure the {productname} _builds_ feature on both bare metal and virtual machines. + +include::modules/builds-overview.adoc[leveloffset=+1] +include::modules/proc_use-quay-build-dockerfiles.adoc[leveloffset=+2] +include::modules/configuring-openshift-tls-component-builds.adoc[leveloffset=+1] + +//bare metal builders +include::modules/proc_use-quay-build-workers-dockerfiles.adoc[leveloffset=+1] +include::modules/prepare-ocp-for-bare-metal-builds.adoc[leveloffset=+2] +ifdef::upstream[] +include::modules/setting-up-builds-aws.adoc[leveloffset=+3] +endif::upstream[] +include::modules/openshift-routes-limitations.adoc[leveloffset=+3] + +// Virtual builders +include::modules/build-enhancements.adoc[leveloffset=+1] +include::modules/builders-virtual-environment.adoc[leveloffset=+2] +include::modules/red-hat-quay-s3-bucket-modify.adoc[leveloffset=+3] +include::modules/red-hat-quay-gcp-bucket-modify.adoc[leveloffset=+3] + +//Starting a new build +include::modules/starting-a-build.adoc[leveloffset=+1] + +//Build triggers with UI +include::modules/build-trigger-overview.adoc[leveloffset=+1] +include::modules/red-hat-quay-builders-ui.adoc[leveloffset=+2] +include::modules/proc_use-quay-git-trigger.adoc[leveloffset=+3] +include::modules/understanding-tag-naming-build-triggers.adoc[leveloffset=+3] +include::modules/skipping-source-control-triggered-build.adoc[leveloffset=+3] +include::modules/manually-triggering-a-build-trigger.adoc[leveloffset=+2] + +// Github OAUTH +include::modules/proc_github-app.adoc[leveloffset=+1] + +//Troubleshooting +include::modules/troubleshooting-builds.adoc[leveloffset=+1] \ No newline at end of file diff --git a/builders_and_image_automation/modules b/builders_and_image_automation/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/builders_and_image_automation/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/clair/docinfo.xml b/clair/docinfo.xml index 457153dd4..3cacdace5 100644 --- a/clair/docinfo.xml +++ b/clair/docinfo.xml @@ -2,7 +2,7 @@ {producty} Vulnerability reporting with Clair on {productname} - Get started with {productname} + Getting started with Clair Red Hat OpenShift Documentation Team diff --git a/clair/master.adoc b/clair/master.adoc index d3751de3f..de1bdb6ce 100644 --- a/clair/master.adoc +++ b/clair/master.adoc @@ -1,9 +1,8 @@ :_content-type: ASSEMBLY - include::modules/attributes.adoc[] - [id="vulnerability-reporting-clair-quay"] = Vulnerability reporting with Clair on {productname} +:context: clair The contents within this guide provide an overview of Clair for {productname}, running Clair on standalone {productname} and Operator deployments, and advanced Clair configuration. @@ -13,14 +12,16 @@ The contents within this guide provide an overview of Clair for {productname}, r The content in this guide explains the key purposes and concepts of Clair on {productname}. It also contains information about Clair releases and the location of official Clair containers. include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] -include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2] +include::modules/about-clair.adoc[leveloffset=+2] +include::modules/clair-severity-mapping.adoc[leveloffset=+2] +//include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2] include::modules/clair-concepts.adoc[leveloffset=+1] // include::modules/internal-api.adoc[leveloffset=+2] include::modules/clair-authentication.adoc[leveloffset=+2] //include::modules/testing-clair.adoc[leveloffset=+1] include::modules/clair-updaters.adoc[leveloffset=+2] -include::modules/clair-updater-urls.adoc[leveloffset=+3] -include::modules/about-clair.adoc[leveloffset=+1] +include::modules/clair-updater-urls.adoc[leveloffset=+2] +include::modules/configuring-clair-updaters.adoc[leveloffset=+2] include::modules/clair-cve.adoc[leveloffset=+2] include::modules/fips-overview.adoc[leveloffset=+2] @@ -30,6 +31,8 @@ include::modules/fips-overview.adoc[leveloffset=+2] This guide contains procedures for running Clair on {productname} in both standalone and {ocp} Operator deployments. include::modules/clair-standalone-configure.adoc[leveloffset=+1] +include::modules/clair-postgresql-database-update.adoc[leveloffset=+2] +include::modules/clair-standalone-upgrade.adoc[leveloffset=+2] include::modules/clair-openshift.adoc[leveloffset=+1] // include::modules/clair-openshift-manual.adoc[leveloffset=+2] @@ -66,7 +69,7 @@ include::modules/clair-export-bundle-standalone.adoc[leveloffset=+3] include::modules/clair-openshift-airgap-database-standalone.adoc[leveloffset=+3] include::modules/clair-openshift-airgap-import-bundle-standalone.adoc[leveloffset=+3] -include::modules/clair-crda-configuration.adoc[leveloffset=+2] +//include::modules/clair-crda-configuration.adoc[leveloffset=+2] include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+2] diff --git a/config_quay/master.adoc b/config_quay/master.adoc index a6f9e0faf..bb454fc26 100644 --- a/config_quay/master.adoc +++ b/config_quay/master.adoc @@ -6,9 +6,16 @@ include::modules/attributes.adoc[] = Configure {productname} include::modules/config-intro.adoc[leveloffset=+1] -include::modules/config-updates-38.adoc[leveloffset=+2] -include::modules/config-updates-37.adoc[leveloffset=+2] -include::modules/config-updates-36.adoc[leveloffset=+2] +include::modules/config-disclaimer.adoc[leveloffset=+1] +include::modules/config-updates-314.adoc[leveloffset=+2] +//include::modules/config-updates-313.adoc[leveloffset=+2] +//include::modules/config-updates-312.adoc[leveloffset=+2] +//include::modules/config-updates-311.adoc[leveloffset=+2] +//include::modules/config-updates-310.adoc[leveloffset=+2] +//include::modules/config-updates-39.adoc[leveloffset=+2] +//include::modules/config-updates-38.adoc[leveloffset=+2] +//include::modules/config-updates-37.adoc[leveloffset=+2] +//include::modules/config-updates-36.adoc[leveloffset=+2] include::modules/config-file-intro.adoc[leveloffset=+2] include::modules/config-file-location.adoc[leveloffset=+2] include::modules/config-file-minimal.adoc[leveloffset=+2] @@ -30,6 +37,11 @@ include::modules/config-fields-storage-aws.adoc[leveloffset=+3] include::modules/config-fields-storage-gcp.adoc[leveloffset=+3] include::modules/config-fields-storage-azure.adoc[leveloffset=+3] include::modules/config-fields-storage-swift.adoc[leveloffset=+3] +include::modules/config-fields-nutanix.adoc[leveloffset=+3] +include::modules/config-fields-ibmcloudstorage.adoc[leveloffset=+3] +include::modules/config-fields-netapp-ontap-s3.adoc[leveloffset=+3] +include::modules/config-fields-hcp.adoc[leveloffset=+3] + include::modules/config-fields-redis.adoc[leveloffset=+2] include::modules/config-fields-modelcache.adoc[leveloffset=+2] @@ -37,7 +49,9 @@ include::modules/config-fields-modelcache-memcache.adoc[leveloffset=+3] include::modules/config-fields-modelcache-single-redis.adoc[leveloffset=+3] include::modules/config-fields-modelcache-clustered-redis.adoc[leveloffset=+3] include::modules/config-fields-tag-expiration.adoc[leveloffset=+2] - +include::modules/config-fields-quota-management.adoc[leveloffset=+2] +include::modules/config-fields-proxy-cache.adoc[leveloffset=+2] +include::modules/config-fields-robot-account.adoc[leveloffset=+2] include::modules/config-preconfigure-automation.adoc[leveloffset=+2] include::modules/deploying-the-operator-using-initial-configuration.adoc[leveloffset=+2] @@ -53,12 +67,17 @@ include::modules/config-fields-ldap.adoc[leveloffset=+2] include::modules/config-fields-mirroring.adoc[leveloffset=+2] include::modules/config-fields-scanner.adoc[leveloffset=+2] include::modules/config-fields-helm-oci.adoc[leveloffset=+2] +include::modules/other-oci-artifacts-with-quay.adoc[leveloffset=+2] +include::modules/config-fields-modelcard-rendering.adoc[leveloffset=+2] +//include::modules/unknown-artifacts.adoc[leveloffset=+2] include::modules/config-fields-actionlog.adoc[leveloffset=+2] include::modules/config-fields-build-logs.adoc[leveloffset=+2] include::modules/config-fields-dockerfile-build.adoc[leveloffset=+2] +include::modules/config-fields-build-manager.adoc[leveloffset=+2] include::modules/config-fields-oauth.adoc[leveloffset=+2] +include::modules/oidc-config-fields.adoc[leveloffset=+2] include::modules/config-fields-nested-repositories.adoc[leveloffset=+2] -include::modules/other-oci-artifacts-with-quay.adoc[leveloffset=+2] +include::modules/ref_quay-integration-config-fields.adoc[leveloffset=+2] include::modules/config-fields-mail.adoc[leveloffset=+2] include::modules/config-fields-user.adoc[leveloffset=+2] include::modules/config-fields-recaptcha.adoc[leveloffset=+2] @@ -69,28 +88,17 @@ include::modules/config-fields-misc.adoc[leveloffset=+2] include::modules/config-fields-legacy.adoc[leveloffset=+2] include::modules/config-fields-v2-ui.adoc[leveloffset=+2] include::modules/config-fields-ipv6.adoc[leveloffset=+2] +include::modules/config-fields-branding.adoc[leveloffset=+2] +include::modules/config-fields-footer.adoc[leveloffset=+2] +include::modules/config-fields-session-logout.adoc[leveloffset=+2] include::modules/config-envvar-intro.adoc[leveloffset=+1] include::modules/config-envvar-georepl.adoc[leveloffset=+2] include::modules/config-envvar-dbpool.adoc[leveloffset=+2] include::modules/config-envvar-worker-connection.adoc[leveloffset=+2] include::modules/config-envvar-worker-count.adoc[leveloffset=+2] +include::modules/config-debug-variables.adoc[leveloffset=+2] -include::modules/operator-config-ui.adoc[leveloffset=+1] -include::modules/operator-config-ui-access.adoc[leveloffset=+2] -include::modules/operator-config-ui-change.adoc[leveloffset=+2] -include::modules/operator-config-ui-monitoring.adoc[leveloffset=+2] -include::modules/operator-config-ui-updated.adoc[leveloffset=+2] - -include::modules/operator-components-intro.adoc[leveloffset=+1] -include::modules/operator-components-managed.adoc[leveloffset=+2] -include::modules/operator-components-unmanaged.adoc[leveloffset=+2] -include::modules/operator-unmanaged-postgres.adoc[leveloffset=+3] -include::modules/operator-unmanaged-storage-noobaa.adoc[leveloffset=+3] -include::modules/operator-unmanaged-hpa.adoc[leveloffset=+3] -include::modules/config-custom-ssl-certs-kubernetes.adoc[leveloffset=+2] -include::modules/operator-helm-oci.adoc[leveloffset=+2] -include::modules/operator-volume-size-overrides.adoc[leveloffset=+2] // Clair @@ -106,20 +114,6 @@ include::modules/config-fields-clair-auth.adoc[leveloffset=+3] include::modules/config-fields-clair-trace.adoc[leveloffset=+3] include::modules/config-fields-clair-metrics.adoc[leveloffset=+3] -//// -include::modules/clair-advanced-configuration-overview.adoc[leveloffset=+2] -include::modules/clair-unmanaged.adoc[leveloffset=+3] -include::modules/unmanaging-clair-database.adoc[leveloffset=+4] -include::modules/clair-crda-configuration.adoc[leveloffset=+3] -include::modules/clair-disconnected.adoc[leveloffset=+3] -include::modules/configuring-clair-disconnected-environment.adoc[leveloffset=+4] -include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+4] -include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+3] -include::modules/clair-add-info.adoc[leveloffset=+3] -//// - -include::modules/proc_container-security-operator-setup.adoc[leveloffset=+1] - ifeval::["{productname}" == "Project Quay"] include::modules/proc_manage-security-scanning.adoc[leveloffset=+1] include::modules/proc_manage-clair-enable.adoc[leveloffset=+1] diff --git a/deploy_quay/docinfo.xml b/deploy_quay/docinfo.xml index 32b84f2f6..482cf8ece 100644 --- a/deploy_quay/docinfo.xml +++ b/deploy_quay/docinfo.xml @@ -1,8 +1,8 @@ {productname} {producty} -Deploy {productname} +Deploying {productname} - Get started with {productname} + Getting started with {productname} Red Hat OpenShift Documentation Team diff --git a/deploy_quay/master.adoc b/deploy_quay/master.adoc index 41465952e..9eb48c65b 100644 --- a/deploy_quay/master.adoc +++ b/deploy_quay/master.adoc @@ -2,56 +2,74 @@ include::modules/attributes.adoc[] [id="deploy-quay-single"] -= Deploy {productname} for proof-of-concept (non-production) purposes += Proof of Concept - Deploying {productname} -{productname} is an enterprise-quality registry for building, securing and serving container images. This procedure describes how to deploy {productname} for proof-of-concept (non-production) purposes. +[IMPORTANT] +==== +The following _proof of concept_ deployment method is unsupported for production purposes. This deployment type uses local storage. Local storage is not guaranteed to provide the required read-after-write consistency and data integrity guarantees during parallel access that a storage registry like {productname} requires. Do not use this deployment type for production purposes. Use it for testing purposes only. +==== -include::modules/con_quay_intro.adoc[leveloffset=+1] +{productname} is an enterprise-quality registry for building, securing and serving container images. The documents in this section detail how to deploy {productname} for _proof of concept_, or non-production, purposes. The primary objectives of this document includes the following: -[id="poc-getting-started"] -== Getting started with {productname} +* How to deploy {productname} for basic non-production purposes. +* Asses {productname}'s container image management, including how to push, pull, tag, and organize images. +* Explore availability and scalability. +* How to deploy an advanced {productname} proof of concept deployment using SSL/TLS certificates. -The {productname} registry can be deployed for non-production purposes on a single machine, either physical or virtual. +Beyond the primary objectives of this document, a proof of concept deployment can be used to test various features offered by {productname}, such as establishing superusers, setting repository quota limitations, enabling Splunk for action log storage, enabling Clair for vulnerability reporting, and more. See the "Next steps" section for a list of some of the features available after you have followed this guide. -include::modules/con_quay_single_prereq.adoc[leveloffset=+2] +This proof of concept deployment procedure can be followed on a single machine, either physical or virtual. + +include::modules/con_quay_single_prereq.adoc[leveloffset=+1] //ifeval::["{productname}" == "Red Hat Quay"] -include::modules/proc_deploy_quay_poc_rhel.adoc[leveloffset=+2] +include::modules/proc_deploy_quay_poc_rhel.adoc[leveloffset=+1] //sendif::[] -include::modules/proc_deploy_quay_poc_db.adoc[leveloffset=+2] +include::modules/preparing-system-deploy-quay.adoc[leveloffset=+1] -include::modules/proc_deploy_quay_poc_redis.adoc[leveloffset=+2] +include::modules/configuring-port-mapping.adoc[leveloffset=+2] -include::modules/proc_deploy_quay_poc_conf.adoc[leveloffset=+2] +include::modules/proc_deploy_quay_poc_db.adoc[leveloffset=+2] -include::modules/proc_deploy_quay_poc_run.adoc[leveloffset=+2] +include::modules/proc_deploy_quay_poc_redis.adoc[leveloffset=+2] -include::modules/proc_deploy_quay_poc_use.adoc[leveloffset=+2] +//include::modules/proc_deploy_quay_poc_conf.adoc[leveloffset=+1] +include::modules/proc_deploy_quay_poc_run.adoc[leveloffset=+1] +include::modules/proc_deploy_quay_poc_use.adoc[leveloffset=+1] -== Advanced {productname} deployment -=== Using SSL to protect connections to {productname} +include::modules/advanced-quay-poc-deployment.adoc[leveloffset=+1] //include::modules/proc_manage-quay-ssl.adoc[leveloffset=+1] -include::modules/ssl-intro.adoc[leveloffset=+3] +include::modules/ssl-intro.adoc[leveloffset=+2] include::modules/ssl-create-certs.adoc[leveloffset=+3] -include::modules/ssl-config-ui.adoc[leveloffset=+3] +include::modules/configuring-ssl-tls.adoc[leveloffset=+2] +//include::modules/ssl-config-ui.adoc[leveloffset=+3] include::modules/ssl-config-cli.adoc[leveloffset=+3] +include::modules/testing-ssl-tls-configuration.adoc[leveloffset=+2] include::modules/ssl-testing-cli.adoc[leveloffset=+3] include::modules/ssl-testing-ui.adoc[leveloffset=+3] -include::modules/ssl-trust-ca-podman.adoc[leveloffset=+3] -include::modules/ssl-trust-ca-system.adoc[leveloffset=+3] +include::modules/ssl-trust-ca-podman.adoc[leveloffset=+2] +include::modules/ssl-trust-ca-system.adoc[leveloffset=+2] + +//local ipv6 deployment +include::modules/proc_deploy_quay_local_ipv6.adoc[leveloffset=+1] +include::modules/poc-creating-dual-stack-cn.adoc[leveloffset=+2] +include::modules/deploy-local-quay-ipv6.adoc[leveloffset=+2] + +include::modules/proc_deploy_quay_poc_next.adoc[leveloffset=1] //// include::modules/proc_deploy_quay_poc_dns.adoc[leveloffset=+2] -//// -include::modules/proc_deploy_quay_common_superuser.adoc[leveloffset=+2] === Repository Mirroring +include::modules/proc_deploy_quay_common_superuser.adoc[leveloffset=+2] + + include::modules/mirroring-intro.adoc[leveloffset=+3] include::modules/config-ui-mirroring.adoc[leveloffset=+3] include::modules/mirroring-worker.adoc[leveloffset=+3] @@ -66,14 +84,13 @@ include::modules/clair-cve.adoc[leveloffset=+3] .Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#vulnerability-reporting-clair-quay-overview[Vulnerability reporting with Clair on {productname}] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#vulnerability-reporting-clair-quay-overview[Vulnerability reporting with Clair on {productname}] include::modules/proc_deploy_quay_poc_restart.adoc[leveloffset=+2] include::modules/fips-overview.adoc[leveloffset=+2] -include::modules/proc_deploy_quay_poc_next.adoc[leveloffset=1] //include::modules/proc_deploy_quay_guided.adoc[leveloffset=+1] @@ -84,3 +101,4 @@ include::modules/proc_deploy_quay_poc_next.adoc[leveloffset=1] //[discrete] //== Additional resources +//// \ No newline at end of file diff --git a/deploy_quay_ha/master.adoc b/deploy_quay_ha/master.adoc index f5fc33ced..7f1f95c34 100644 --- a/deploy_quay_ha/master.adoc +++ b/deploy_quay_ha/master.adoc @@ -25,6 +25,11 @@ include::modules/proc_deploy_quay_add.adoc[leveloffset=+1] include::modules/con_deploy_quay_start_using.adoc[leveloffset=+1] +//upgrade + +include::modules/upgrading-geo-repl-quay.adoc[leveloffset=+1] + +include::modules/health-check-quay.adoc[leveloffset=+1] [discrete] == Additional resources diff --git a/deploy_quay_on_openshift_op_tng/master.adoc b/deploy_quay_on_openshift_op_tng/master.adoc index 0d1e8fc5b..479892ec9 100644 --- a/deploy_quay_on_openshift_op_tng/master.adoc +++ b/deploy_quay_on_openshift_op_tng/master.adoc @@ -1,7 +1,7 @@ include::modules/attributes.adoc[] -[id='deploy-quay-on-openshift-op-tng'] -= Deploy {productname} on OpenShift with the Quay Operator +[id="deploy-quay-on-openshift-op-tng"] += Deploy {productname} on {ocp} with the {productname} Operator {productname} is an enterprise-quality container registry. Use {productname} to build and store container images, then make them available to deploy across your enterprise. @@ -36,16 +36,26 @@ ifeval::["{productname}" == "Red Hat Quay"] include::modules/operator-standalone-object-gateway.adoc[leveloffset=4] endif::[] +//traffic ingress +[id="configuring-traffic-ingress"] +== Configuring traffic ingress +include::modules/operator-preconfig-tls-routes.adoc[leveloffset=+1] + //Database -=== Configuring the database -include::modules/operator-unmanaged-postgres.adoc[leveloffset=+3] -include::modules/config-fields-db.adoc[leveloffset=+3] -include::modules/operator-managed-postgres.adoc[leveloffset=+3] -include::modules/operator-preconfig-tls-routes.adoc[leveloffset=+2] +[id="configuring-the-database-poc"] +== Configuring the database +include::modules/operator-unmanaged-postgres.adoc[leveloffset=+1] +include::modules/config-fields-db.adoc[leveloffset=+1] +include::modules/operator-managed-postgres.adoc[leveloffset=+1] //* The Operator will deploy an OpenShift `Route` as the default entrypoint to the registry. If you prefer a different entrypoint (e.g. `Ingress` or direct `Service` access that configuration will need to be done manually). include::modules/operator-components-unmanaged-other.adoc[leveloffset=+2] -include::modules/operator-unmanaged-redis.adoc[leveloffset=+3] -include::modules/config-fields-redis.adoc[leveloffset=+4] +include::modules/operator-unmanaged-redis.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +xref:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-redis[Redis configuration fields] + include::modules/operator-unmanaged-hpa.adoc[leveloffset=+3] include::modules/operator-unmanaged-route.adoc[leveloffset=+3] include::modules/operator-unmanaged-monitoring.adoc[leveloffset=+3] @@ -129,7 +139,7 @@ include::modules/clair-export-bundle-standalone.adoc[leveloffset=+5] include::modules/clair-openshift-airgap-database-standalone.adoc[leveloffset=+5] include::modules/clair-openshift-airgap-import-bundle-standalone.adoc[leveloffset=+5] -include::modules/clair-crda-configuration.adoc[leveloffset=+3] +//include::modules/clair-crda-configuration.adoc[leveloffset=+3] include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+3] //// @@ -144,10 +154,12 @@ include::modules/build-limitations.adoc[leveloffset=+2] include::modules/builders-virtual-environment.adoc[leveloffset=+2] include::modules/georepl-intro.adoc[leveloffset=+1] +include::modules/arch-georpl-features.adoc[leveloffset=+2] include::modules/georepl-prereqs.adoc[leveloffset=+2] include::modules/georepl-arch-operator.adoc[leveloffset=+2] include::modules/georepl-deploy-operator.adoc[leveloffset=+3] include::modules/georepl-mixed-storage.adoc[leveloffset=+3] +include::modules/upgrading-geo-repl-quay-operator.adoc[leveloffset=+2] include::modules/backing-up-and-restoring-intro.adoc[leveloffset=+1] include::modules/backing-up-red-hat-quay-operator.adoc[leveloffset=+2] diff --git a/deploy_red_hat_quay_operator/docinfo.xml b/deploy_red_hat_quay_operator/docinfo.xml new file mode 100644 index 000000000..90dcf4389 --- /dev/null +++ b/deploy_red_hat_quay_operator/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Deploying the {productname} Operator on {ocp} + + Deploy the {productname} Operator on an {ocp} cluster + + + Red Hat OpenShift Documentation Team + + diff --git a/deploy_red_hat_quay_operator/master.adoc b/deploy_red_hat_quay_operator/master.adoc new file mode 100644 index 000000000..f5bfc8e4c --- /dev/null +++ b/deploy_red_hat_quay_operator/master.adoc @@ -0,0 +1,101 @@ +include::modules/attributes.adoc[] + +[id="deploy-quay-on-openshift-op-tng"] += Deploying the {productname} Operator on {ocp} + +{productname} is an enterprise-quality container registry. Use {productname} to build and store container images, then make them available to deploy across your enterprise. + +The {productname} Operator provides a simple method to deploy and manage {productname} on an OpenShift cluster. + +//differences +include::modules/operator-differences.adoc[leveloffset=+2] + +//concepts +include::modules/operator-concepts.adoc[leveloffset=+1] +include::modules/operator-components-intro.adoc[leveloffset=+2] +include::modules/operator-components-managed.adoc[leveloffset=+2] +include::modules/operator-components-unmanaged.adoc[leveloffset=+2] +include::modules/operator-config-bundle-secret.adoc[leveloffset=+2] +include::modules/operator-prereq.adoc[leveloffset=+2] + +//installing the operator +include::modules/operator-install.adoc[leveloffset=+1] + + +//preconfiguration +include::modules/operator-preconfigure.adoc[leveloffset=+1] +include::modules/config-preconfigure-automation.adoc[leveloffset=+2] +include::modules/operator-preconfig-storage.adoc[leveloffset=+2] +include::modules/operator-unmanaged-storage.adoc[leveloffset=+3] +include::modules/operator-unmanaged-storage-noobaa.adoc[leveloffset=+3] +include::modules/operator-managed-storage.adoc[leveloffset=3] +include::modules/operator-standalone-object-gateway.adoc[leveloffset=4] + +//traffic ingress +[id="configuring-traffic-ingress"] +== Configuring traffic ingress +include::modules/operator-preconfig-tls-routes.adoc[leveloffset=+2] + +//configuring resources +include::modules/configuring-resources-managed-components.adoc[leveloffset=+1] + +//Database +[id="configuring-the-database-poc"] +== Configuring the database +include::modules/operator-unmanaged-postgres.adoc[leveloffset=+2] +include::modules/config-fields-db.adoc[leveloffset=+3] +include::modules/operator-managed-postgres.adoc[leveloffset=+3] +//* The Operator will deploy an OpenShift `Route` as the default entrypoint to the registry. If you prefer a different entrypoint (e.g. `Ingress` or direct `Service` access that configuration will need to be done manually). +include::modules/operator-components-unmanaged-other.adoc[leveloffset=+2] +include::modules/operator-unmanaged-redis.adoc[leveloffset=+3] + +[role="_additional-resources"] +.Additional resources +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-redis[Redis configuration fields] + +include::modules/operator-unmanaged-hpa.adoc[leveloffset=+3] +include::modules/operator-unmanaged-route.adoc[leveloffset=+3] +include::modules/operator-unmanaged-monitoring.adoc[leveloffset=+3] +include::modules/operator-unmanaged-mirroring.adoc[leveloffset=+3] + +//operator deployment +include::modules/operator-deploy.adoc[leveloffset=+1] +//cli +include::modules/operator-deploy-cli.adoc[leveloffset=+2] +include::modules/first-user-api.adoc[leveloffset=+3] +include::modules/operator-deploy-view-pods-cli.adoc[leveloffset=+3] +include::modules/operator-deploy-hpa.adoc[leveloffset=+3] + +[role="_additional-resources"] +.Additional resources +For more information on pre-configuring your {productname} deployment, see the section xref:config-preconfigure-automation[Pre-configuring {productname} for automation] + +include::modules/operator-monitor-deploy-cli.adoc[leveloffset=+3] +//ui +include::modules/operator-deploy-ui.adoc[leveloffset=+2] +include::modules/operator-first-user-ui.adoc[leveloffset=+3] + +//quayregistry status +include::modules/operator-quayregistry-status.adoc[leveloffset=+1] + +//configuring +include::modules/operator-config-cli.adoc[leveloffset=+1] +include::modules/operator-config-cli-access.adoc[leveloffset=+2] +include::modules/operator-config-cli-download.adoc[leveloffset=+2] + +//SSL/TLS +include::modules/operator-custom-ssl-certs-config-bundle.adoc[leveloffset=+1] +include::modules/ssl-create-certs.adoc[leveloffset=+2] +include::modules/creating-custom-ssl-certs-config-bundle.adoc[leveloffset=+2] + +//Deploying configuration tool +//include::modules/operator-config-ui.adoc[leveloffset=+1] + +//upgrading 38-39 +//removed for 3.10+ +//include::modules/upgrading-postgresql.adoc[leveloffset=+1] + +[role="quay-next-steps"] +.Next steps + +* https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/[{productname} features] diff --git a/deploy_red_hat_quay_operator/modules b/deploy_red_hat_quay_operator/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/deploy_red_hat_quay_operator/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/docs/api-v2-public.json b/docs/api-v2-public.json new file mode 100644 index 000000000..b36e6637d --- /dev/null +++ b/docs/api-v2-public.json @@ -0,0 +1,2369 @@ +{ + "openapi": "3.0.3", + "info": { + "title": "Red Hat Quay API", + "version": "1.0.0", + "description": "This API allows you to perform many of the operations required to work with Quay repositories, users, and organizations." + }, + "servers": [ + { + "url": "{protocol}://{host}", + "description": "Set your own Red Hat Quay registry URL", + "variables": { + "protocol": { + "default": "https", + "enum": [ + "http", + "https" + ] + }, + "host": { + "default": "quay-server.example.com", + "description": "Enter your Red Hat Quay registry hostname." + } + } + } + ], + "security": [ + { + "BearerAuth": [] + } + ], + "tags": [ + { + "name": "Application specific tokens", + "description": "Manage application-specific tokens by using the API" + }, + { + "name": "Builds", + "description": "API endpoints for managing Quay repository builds" + }, + { + "name": "Discovery", + "description": "API discovery information" + }, + { + "name": "Error", + "description": "Obtain error details by using the API" + }, + { + "name": "Global messages", + "description": "Messages API" + }, + { + "name": "Logs", + "description": "Access usage logs for organizations or repositories." + } + ], + "paths": { + "/api/v1/user/apptoken": { + "get": { + "tags": [ + "Application specific tokens" + ], + "summary": "List app-specific tokens", + "description": "Retrieves a list of application-specific tokens for the user.", + "operationId": "listAppTokens", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "expiring", + "in": "query", + "required": false, + "schema": { + "type": "boolean" + }, + "description": "If true, only returns those tokens expiring soon" + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + }, + "post": { + "tags": [ + "Application specific tokens" + ], + "summary": "Create a new app-specific token", + "description": "Creates a new application-specific token for the user.", + "operationId": "createAppToken", + "security": [ + { + "BearerAuth": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "title": { + "type": "string", + "example": "MyAppToken" + } + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "token": { + "type": "string", + "example": "abc123xyz" + } + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/user/apptoken/{token_uuid}": { + "get": { + "tags": [ + "Application specific tokens" + ], + "summary": "Get details of a specific app token", + "description": "Retrieves details for a specific application token.", + "operationId": "getAppToken", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "token_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Application specific tokens" + ], + "summary": "Revoke a specific app token", + "description": "Revokes a specific application token for the user.", + "operationId": "revokeAppToken", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "token_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Deleted" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/repository/{repository}/build/{build_uuid}/status": { + "get": { + "tags": [ + "Builds" + ], + "summary": "Return the status for the builds specified by the build UUID", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The full path of the repository (e.g., namespace/name)" + }, + { + "name": "build_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The UUID of the build" + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + } + }, + "/api/v1/repository/{repository}/build/{build_uuid}/logs": { + "get": { + "tags": [ + "Builds" + ], + "summary": "Return the build logs for the specified build UUID", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "build_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + } + }, + "/api/v1/repository/{repository}/build/{build_uuid}": { + "get": { + "tags": [ + "Builds" + ], + "summary": "Returns information about a build", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "build_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + }, + "delete": { + "tags": [ + "Builds" + ], + "summary": "Cancel a repository build", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "build_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Deleted" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + } + }, + "/api/v1/repository/{repository}/build/": { + "post": { + "tags": [ + "Builds" + ], + "summary": "Request a repository build and push", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "file_id": { + "type": "string" + }, + "archive_url": { + "type": "string" + }, + "subdirectory": { + "type": "string" + }, + "dockerfile_path": { + "type": "string" + }, + "context": { + "type": "string" + }, + "pull_robot": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + }, + "get": { + "tags": [ + "Builds" + ], + "summary": "Get the list of repository builds", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "since", + "in": "query", + "schema": { + "type": "integer" + }, + "description": "Returns all builds since the given Unix timestamp" + }, + { + "name": "limit", + "in": "query", + "schema": { + "type": "integer" + }, + "description": "The maximum number of builds to return" + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + } + }, + "/api/v1/discovery": { + "get": { + "tags": [ + "Discovery" + ], + "summary": "List all available API endpoints", + "description": "Returns a list of all API endpoints available in the Swagger API format.", + "operationId": "getDiscovery", + "parameters": [ + { + "name": "internal", + "in": "query", + "description": "Whether to include internal APIs.", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/error/{error_type}": { + "get": { + "tags": [ + "Error" + ], + "summary": "Get a detailed description of the error", + "description": "Retrieves a detailed description of the specified error type.", + "operationId": "getErrorDescription", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "error_type", + "in": "path", + "description": "The error code identifying the type of error.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiErrorDescription" + }, + "example": { + "error": "404", + "message": "The requested resource was not found." + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 400, + "detail": "Invalid request format." + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 401, + "detail": "Authentication token is missing or invalid." + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 403, + "detail": "You do not have permission to access this resource." + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 404, + "detail": "Error type not found." + } + } + } + } + } + } + }, + "/api/v1/messages": { + "post": { + "tags": [ + "Global messages" + ], + "summary": "Create a global message", + "description": "Creates a new global message with a specified content type and severity level.", + "operationId": "createGlobalMessage", + "security": [ + { + "oauth2_implicit": [ + "super:user" + ] + }, + { + "BearerAuth": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The message text." + }, + "media_type": { + "type": "string", + "enum": [ + "text/plain", + "text/html", + "application/json" + ], + "description": "The media type of the message." + }, + "severity": { + "type": "string", + "enum": [ + "info", + "warning", + "error" + ], + "description": "The severity level of the message." + } + }, + "required": [ + "content", + "media_type", + "severity" + ] + } + } + }, + "example": { + "message": { + "content": "Hi", + "media_type": "text/plain", + "severity": "info" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 400, + "detail": "Invalid message format." + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 401, + "detail": "Authentication required." + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 403, + "detail": "You do not have permission to create messages." + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 404, + "detail": "Endpoint not found." + } + } + } + } + } + }, + "get": { + "tags": [ + "Global messages" + ], + "summary": "Get global messages", + "description": "Returns all global messages visible to super users.", + "operationId": "getGlobalMessages", + "security": [ + { + "oauth2_implicit": [ + "super:user" + ] + }, + { + "BearerAuth": [] + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "messages": { + "type": "array", + "description": "A list of global messages.", + "items": { + "type": "object", + "properties": { + "uuid": { + "type": "string", + "format": "uuid", + "description": "Unique identifier of the message." + }, + "content": { + "type": "string", + "description": "The message content." + }, + "severity": { + "type": "string", + "enum": [ + "info", + "warning", + "error" + ], + "description": "The severity level of the message." + }, + "media_type": { + "type": "string", + "description": "The media type of the message (e.g., text/plain)." + } + } + } + } + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/message/{uuid}": { + "delete": { + "tags": [ + "Global messages" + ], + "summary": "Delete a message", + "description": "Deletes a global message by its UUID.", + "operationId": "deleteGlobalMessage", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "uuid", + "in": "path", + "required": true, + "description": "The unique identifier of the message to delete.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Deleted" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/user/aggregatelogs": { + "get": { + "summary": "Returns the aggregated logs for the current user.", + "description": "Retrieves aggregated logs for the specified user within a given date range.", + "operationId": "getAggregateUserLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "performer", + "in": "query", + "description": "Username for which to filter logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: YYYY-MM-DD in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: YYYY-MM-DD in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/user/exportlogs": { + "post": { + "summary": "Exports aggregated logs for the current user.", + "description": "Initiates an export of user logs for a given date range and sends the exported logs via a callback URL or email.\n", + "operationId": "exportUserLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: YYYY-MM-DD in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: YYYY-MM-DD in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "requestBody": { + "description": "Configuration for an export logs operation (empty JSON object required for request).", + "required": false, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "callback_url": { + "type": "string", + "description": "The callback URL to invoke with a link to the exported logs." + }, + "callback_email": { + "type": "string", + "description": "The e-mail address at which to e-mail a link to the exported logs." + } + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "export_id": { + "type": "string", + "description": "The ID of the exported log file.", + "example": "19689987-b37c-4319-a18b-86b92407af74" + } + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/user/logs": { + "get": { + "summary": "List the logs for the current user.", + "description": "Retrieves a list of logs for the current user with optional filters for performer, start time, end time, and pagination.\n", + "operationId": "listUserLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "user:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "next_page", + "in": "query", + "description": "The page token for the next page of logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "performer", + "in": "query", + "description": "Username for which to filter logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "logs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "log_id": { + "type": "string" + }, + "message": { + "type": "string" + }, + "timestamp": { + "type": "string", + "format": "date-time" + } + } + } + } + }, + "example": { + "logs": [ + { + "log_id": "12345", + "message": "User performed an action.", + "timestamp": "2024-03-25T12:00:00Z" + } + ] + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/organization/{orgname}/aggregatelogs": { + "get": { + "summary": "Gets the aggregated logs for the specified organization.", + "description": "Retrieves the aggregated logs for a specified organization with optional filters for performer, start time, and end time.\n", + "operationId": "getAggregateOrgLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "org:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "orgname", + "in": "path", + "description": "The name of the organization.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "performer", + "in": "query", + "description": "Username for which to filter logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM-DD-YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM-DD-YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "logs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "log_id": { + "type": "string" + }, + "message": { + "type": "string" + }, + "timestamp": { + "type": "string", + "format": "date-time" + } + } + } + } + }, + "example": { + "logs": [ + { + "log_id": "98765", + "message": "Organization-level action performed.", + "timestamp": "2024-03-25T12:00:00Z" + } + ] + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/organization/{orgname}/exportlogs": { + "post": { + "summary": "Exports the logs for the specified organization.", + "description": "Exports the logs for a specified organization within a given time range.\n", + "operationId": "exportOrgLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "org:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "orgname", + "in": "path", + "description": "The name of the organization.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "requestBody": { + "description": "Optional payload (empty JSON object required for request).", + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "example": {} + } + } + } + }, + "responses": { + "200": { + "description": "Export request received successfully.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "export_id": { + "type": "string" + } + }, + "example": { + "export_id": "d21f74c6-7e6c-4d2a-bc34-8c926789ab56" + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/organization/{orgname}/logs": { + "get": { + "summary": "List logs for the specified organization.", + "description": "Retrieves the logs for a specified organization within a given time range.\n", + "operationId": "listOrgLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "org:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "orgname", + "in": "path", + "description": "The name of the organization.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "next_page", + "in": "query", + "description": "The page token for the next page.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "performer", + "in": "query", + "description": "Username for which to filter logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "type": "object", + "properties": { + "log_id": { + "type": "string" + }, + "timestamp": { + "type": "string", + "format": "date-time" + }, + "performer": { + "type": "string" + }, + "action": { + "type": "string" + } + } + }, + "example": [ + { + "log_id": "1a2b3c4d", + "timestamp": "2025-03-25T10:15:30Z", + "performer": "johndoe", + "action": "repo.created" + } + ] + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/repository/{repository}/aggregatelogs": { + "get": { + "summary": "Get aggregated logs for a repository", + "description": "Returns the aggregated logs for the specified repository within a given time range.\n", + "operationId": "getAggregateRepoLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "repo:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "repository", + "in": "path", + "description": "The full path of the repository (e.g., namespace/name).", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM-DD-YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM-DD-YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "repository": { + "type": "string" + }, + "logs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "log_id": { + "type": "string" + }, + "timestamp": { + "type": "string", + "format": "date-time" + }, + "performer": { + "type": "string" + }, + "action": { + "type": "string" + } + } + } + } + }, + "example": { + "repository": "namespace/myrepo", + "logs": [ + { + "log_id": "1a2b3c4d", + "timestamp": "2025-03-25T10:15:30Z", + "performer": "johndoe", + "action": "tag.deleted" + } + ] + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/repository/{repository}/exportlogs": { + "post": { + "summary": "Export repository logs", + "description": "Queues an export of the logs for the specified repository. The export can be delivered via a callback URL or an email notification.\n", + "operationId": "exportRepoLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "repo:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "repository", + "in": "path", + "description": "The full path of the repository (e.g., namespace/name).", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "requestBody": { + "description": "Configuration for an export logs operation (empty JSON object required for request).", + "required": false, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "callback_url": { + "type": "string", + "format": "uri", + "description": "The callback URL to invoke with a link to the exported logs." + }, + "callback_email": { + "type": "string", + "format": "email", + "description": "The email address at which to send a link to the exported logs." + } + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "export_id": { + "type": "string" + }, + "status": { + "type": "string", + "enum": [ + "queued", + "processing", + "completed" + ] + } + }, + "example": { + "export_id": "12345-abcdef", + "status": "queued" + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/repository/{repository}/logs": { + "get": { + "summary": "List repository logs", + "description": "Retrieves a list of logs for the specified repository. Supports pagination and filtering by time range.\n", + "operationId": "listRepoLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "repo:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "repository", + "in": "path", + "description": "The full path of the repository (e.g., namespace/name).", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "next_page", + "in": "query", + "description": "The page token for retrieving the next set of logs.", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "logs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "timestamp": { + "type": "string", + "format": "date-time", + "description": "Timestamp of the log entry." + }, + "action": { + "type": "string", + "description": "Action performed." + }, + "performer": { + "type": "string", + "description": "User who performed the action." + }, + "details": { + "type": "object", + "description": "Additional details about the log entry." + } + } + } + }, + "next_page": { + "type": "string", + "description": "Token for the next page of results, if available." + } + }, + "example": { + "logs": [ + { + "timestamp": "2024-03-25T12:34:56Z", + "action": "push", + "performer": "user123", + "details": { + "tag": "latest" + } + } + ], + "next_page": "token123" + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + } + }, + "components": { + "securitySchemes": { + "BearerAuth": { + "type": "http", + "scheme": "bearer" + } + }, + "schemas": { + "ApiError": { + "type": "object", + "properties": { + "status": { + "type": "integer", + "description": "HTTP status code of the error." + }, + "detail": { + "type": "string", + "description": "A short message describing the error." + } + } + }, + "ApiErrorDescription": { + "type": "object", + "properties": { + "error": { + "type": "string", + "description": "The error code." + }, + "message": { + "type": "string", + "description": "A detailed description of the error." + } + } + } + } + } +} \ No newline at end of file diff --git a/docs/api-v2.md b/docs/api-v2.md new file mode 100644 index 000000000..e64324ce1 --- /dev/null +++ b/docs/api-v2.md @@ -0,0 +1,3 @@ +# API V2 + + diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..5327c2e4a --- /dev/null +++ b/docs/index.md @@ -0,0 +1,17 @@ +# Red Hat Quay + +For full documentation visit [mkdocs.org](https://www.mkdocs.org). + +## Commands + +* `mkdocs new [dir-name]` - Create a new project. +* `mkdocs serve` - Start the live-reloading docs server. +* `mkdocs build` - Build the documentation site. +* `mkdocs -h` - Print help message and exit. + +## Project layout + + mkdocs.yml # The configuration file. + docs/ + index.md # The documentation homepage. + ... # Other markdown pages, images and other files. diff --git a/images/application-token.png b/images/application-token.png new file mode 100644 index 000000000..5acb1c2ce Binary files /dev/null and b/images/application-token.png differ diff --git a/images/august-receipt.png b/images/august-receipt.png new file mode 100644 index 000000000..b1f1ead53 Binary files /dev/null and b/images/august-receipt.png differ diff --git a/images/authentication-ldap-admin-dn.png b/images/authentication-ldap-admin-dn.png index 09ff8bb75..3c7ea2eed 100644 Binary files a/images/authentication-ldap-admin-dn.png and b/images/authentication-ldap-admin-dn.png differ diff --git a/images/authentication-ldap-basedn.png b/images/authentication-ldap-basedn.png index b0a02136b..9749ce9d7 100644 Binary files a/images/authentication-ldap-basedn.png and b/images/authentication-ldap-basedn.png differ diff --git a/images/auto-prune-policies-page.png b/images/auto-prune-policies-page.png new file mode 100644 index 000000000..05ddb54c2 Binary files /dev/null and b/images/auto-prune-policies-page.png differ diff --git a/images/build-history.png b/images/build-history.png new file mode 100644 index 000000000..8f9605f38 Binary files /dev/null and b/images/build-history.png differ diff --git a/images/build-trigger-example.png b/images/build-trigger-example.png new file mode 100644 index 000000000..bb3b5fad6 Binary files /dev/null and b/images/build-trigger-example.png differ diff --git a/images/custom-tagging.png b/images/custom-tagging.png new file mode 100644 index 000000000..d40092b7d Binary files /dev/null and b/images/custom-tagging.png differ diff --git a/images/e2e-demo-httpd-example.png b/images/e2e-demo-httpd-example.png new file mode 100644 index 000000000..d32aa5b90 Binary files /dev/null and b/images/e2e-demo-httpd-example.png differ diff --git a/images/export-usage-logs.png b/images/export-usage-logs.png index 79e6151fc..c5e47b1fa 100644 Binary files a/images/export-usage-logs.png and b/images/export-usage-logs.png differ diff --git a/images/image-fetch.png b/images/image-fetch.png index b306047b9..568de1dec 100644 Binary files a/images/image-fetch.png and b/images/image-fetch.png differ diff --git a/images/logsv2-ui.png b/images/logsv2-ui.png new file mode 100644 index 000000000..38c8b4876 Binary files /dev/null and b/images/logsv2-ui.png differ diff --git a/images/mail-attribute-ldap.png b/images/mail-attribute-ldap.png new file mode 100644 index 000000000..9a487c0bd Binary files /dev/null and b/images/mail-attribute-ldap.png differ diff --git a/images/manifest-example.png b/images/manifest-example.png new file mode 100644 index 000000000..6c7313067 Binary files /dev/null and b/images/manifest-example.png differ diff --git a/images/metadata-request.png b/images/metadata-request.png new file mode 100644 index 000000000..ccc13a985 Binary files /dev/null and b/images/metadata-request.png differ diff --git a/images/modelcard.png b/images/modelcard.png new file mode 100644 index 000000000..717e216e0 Binary files /dev/null and b/images/modelcard.png differ diff --git a/images/permanently-delete-image-tag.png b/images/permanently-delete-image-tag.png new file mode 100644 index 000000000..ebe887964 Binary files /dev/null and b/images/permanently-delete-image-tag.png differ diff --git a/images/quay-hybrid-cloud-landing-page.png b/images/quay-hybrid-cloud-landing-page.png new file mode 100644 index 000000000..24d2ed49e Binary files /dev/null and b/images/quay-hybrid-cloud-landing-page.png differ diff --git a/images/quayio-footer.png b/images/quayio-footer.png new file mode 100644 index 000000000..db71d50fd Binary files /dev/null and b/images/quayio-footer.png differ diff --git a/images/quayio-header.png b/images/quayio-header.png new file mode 100644 index 000000000..540519188 Binary files /dev/null and b/images/quayio-header.png differ diff --git a/images/quayio-repo-landing-page.png b/images/quayio-repo-landing-page.png new file mode 100644 index 000000000..532f4cdfe Binary files /dev/null and b/images/quayio-repo-landing-page.png differ diff --git a/images/register-app.png b/images/register-app.png index b3963d7b0..aff4c5aaa 100644 Binary files a/images/register-app.png and b/images/register-app.png differ diff --git a/images/scheduled-maintenance-banner.png b/images/scheduled-maintenance-banner.png new file mode 100644 index 000000000..160142fdc Binary files /dev/null and b/images/scheduled-maintenance-banner.png differ diff --git a/images/set-repository-permissions-robot-account.png b/images/set-repository-permissions-robot-account.png new file mode 100644 index 000000000..fa97eeb0e Binary files /dev/null and b/images/set-repository-permissions-robot-account.png differ diff --git a/images/set-team-role.png b/images/set-team-role.png index c27ee8d37..334138383 100644 Binary files a/images/set-team-role.png and b/images/set-team-role.png differ diff --git a/images/splunk-log-metadata.png b/images/splunk-log-metadata.png new file mode 100644 index 000000000..813e2dd17 Binary files /dev/null and b/images/splunk-log-metadata.png differ diff --git a/images/tag-expiration-v2-ui.png b/images/tag-expiration-v2-ui.png new file mode 100644 index 000000000..f15c4b1f3 Binary files /dev/null and b/images/tag-expiration-v2-ui.png differ diff --git a/images/total-quota-consumed.png b/images/total-quota-consumed.png new file mode 100644 index 000000000..e2b7b83af Binary files /dev/null and b/images/total-quota-consumed.png differ diff --git a/images/total-registry-size.png b/images/total-registry-size.png new file mode 100644 index 000000000..689f87c9b Binary files /dev/null and b/images/total-registry-size.png differ diff --git a/images/uid-attribute-ldap.png b/images/uid-attribute-ldap.png new file mode 100644 index 000000000..42d78acfc Binary files /dev/null and b/images/uid-attribute-ldap.png differ diff --git a/images/user-relative-dn.png b/images/user-relative-dn.png new file mode 100644 index 000000000..274208f5f Binary files /dev/null and b/images/user-relative-dn.png differ diff --git a/images/view-credentials.png b/images/view-credentials.png index 52296ffcb..cce2a663e 100644 Binary files a/images/view-credentials.png and b/images/view-credentials.png differ diff --git a/manage_quay/master.adoc b/manage_quay/master.adoc index 0d3e19c5b..3012076b7 100644 --- a/manage_quay/master.adoc +++ b/manage_quay/master.adoc @@ -39,6 +39,7 @@ include::modules/proc_manage-release-notifications.adoc[leveloffset=+1] == Using SSL to protect connections to {productname} //include::modules/proc_manage-quay-ssl.adoc[leveloffset=+1] include::modules/ssl-intro.adoc[leveloffset=+2] +//// include::modules/ssl-create-certs.adoc[leveloffset=+2] include::modules/ssl-config-cli.adoc[leveloffset=+2] include::modules/ssl-config-ui.adoc[leveloffset=+2] @@ -49,8 +50,16 @@ include::modules/ssl-trust-ca-system.adoc[leveloffset=+2] include::modules/config-custom-ssl-certs-manual.adoc[leveloffset=+1] include::modules/config-custom-ssl-certs-kubernetes.adoc[leveloffset=+2] +//// include::modules/proc_manage-log-storage.adoc[leveloffset=+1] +include::modules/proc_manage-log-storage-elasticsearch.adoc[leveloffset=+2] +include::modules/proc_manage-log-storage-splunk.adoc[leveloffset=+2] +include::modules/proc_installing-creating-username-splunk.adoc[leveloffset=+3] +include::modules/proc_generating-splunk-token.adoc[leveloffset=+3] +include::modules/proc_splunk-config.adoc[leveloffset=+3] +include::modules/proc_splunk-action-log.adoc[leveloffset=+3] +include::modules/understanding-action-logs.adoc[leveloffset=+2] :context: security-scanning @@ -62,14 +71,6 @@ include::modules/clair-testing.adoc[leveloffset=+2] :context: manage_quay -include::modules/conc_quay-bridge-operator.adoc[leveloffset=+1] -include::modules/proc_setting-up-quay-for-qbo.adoc[leveloffset=+2] -include::modules/proc_installing-qbo-on-ocp.adoc[leveloffset=+2] -include::modules/proc_creating-ocp-secret-for-oauth-token.adoc[leveloffset=+2] -include::modules/proc_creating-quay-integration-cr.adoc[leveloffset=+2] -include::modules/ref_quay-integration-config-fields.adoc[leveloffset=+2] - - [[repo-mirroring-in-red-hat-quay]] == Repository mirroring @@ -94,6 +95,20 @@ include::modules/proc_manage-ipv6-dual-stack.adoc[leveloffset=+1] include::modules/proc_manage-ldap-setup.adoc[leveloffset=+1] +//oidc and SSO +include::modules/configuring-oidc-authentication.adoc[leveloffset=+1] +include::modules/configuring-red-hat-sso.adoc[leveloffset=+2] +include::modules/enabling-team-sync-oidc.adoc[leveloffset=+2] + +//keyless auth + +include::modules/keyless-authentication-robot-accounts.adoc[leveloffset=+1] + +//aws sts +include::modules/configuring-aws-sts-quay.adoc[leveloffset=+1] +include::modules/configuring-quay-standalone-aws-sts.adoc[leveloffset=+2] + + include::modules/proc_manage-quay-prometheus.adoc[leveloffset=+1] include::modules/metrics-intro.adoc[leveloffset=+2] include::modules/metrics-general-registry-stats.adoc[leveloffset=+3] @@ -106,38 +121,56 @@ include::modules/metrics-authentication.adoc[leveloffset=+3] //include::modules/proc_manage-quay-geo-replication.adoc[leveloffset=+1] include::modules/quota-management-and-enforcement.adoc[leveloffset=+1] -include::modules/config-fields-quota.adoc[leveloffset=+2] -include::modules/quota-management-arch.adoc[leveloffset=+2] +//include::modules/quota-management-arch.adoc[leveloffset=+2] +include::modules/quota-management-limitations.adoc[leveloffset=+2] +include::modules/red-hat-quay-quota-management-configure-39.adoc[leveloffset=+2] + +include::modules/quota-management-testing-39.adoc[leveloffset=+2] +include::modules/setting-default-quota.adoc[leveloffset=+2] include::modules/quota-establishment-ui.adoc[leveloffset=+2] include::modules/quota-establishment-api.adoc[leveloffset=+2] -include::modules/quota-management-limitations.adoc[leveloffset=+2] +include::modules/quota-management-query-39.adoc[leveloffset=+2] +include::modules/deleting-tag-permanently.adoc[leveloffset=+2] +//namespace auto-pruning + +include::modules/red-hat-quay-namespace-auto-pruning-overview.adoc[leveloffset=+1] +include::modules/managing-namespace-auto-pruning-policies.adoc[leveloffset=+2] include::modules/georepl-intro.adoc[leveloffset=+1] +include::modules/arch-georpl-features.adoc[leveloffset=+2] include::modules/georepl-prereqs.adoc[leveloffset=+2] -include::modules/georepl-arch-standalone.adoc[leveloffset=+2] +//include::modules/georepl-arch-standalone.adoc[leveloffset=+2] include::modules/config-ui-storage-georepl.adoc[leveloffset=+3] include::modules/georepl-deploy-standalone.adoc[leveloffset=+3] -include::modules/georepl-arch-operator.adoc[leveloffset=+2] +include::modules/standalone-georepl-site-removal.adoc[leveloffset=+3] +//include::modules/georepl-arch-operator.adoc[leveloffset=+2] include::modules/georepl-deploy-operator.adoc[leveloffset=+3] -include::modules/georepl-mixed-storage.adoc[leveloffset=+3] +include::modules/operator-georepl-site-removal.adoc[leveloffset=+3] +include::modules/georepl-mixed-storage.adoc[leveloffset=+2] +//// include::modules/backing-up-and-restoring-intro.adoc[leveloffset=+1] include::modules/backing-up-red-hat-quay-operator.adoc[leveloffset=+2] include::modules/restoring-red-hat-quay.adoc[leveloffset=+2] - - -include::modules/standalone-to-operator-backup-restore.adoc[leveloffset=+1] +//// include::modules/standalone-deployment-backup-restore.adoc[leveloffset=+1] +include::modules/optional-enabling-read-only-mode-backup-restore-standalone.adoc[leveloffset=+2] include::modules/backing-up-red-hat-quay-standalone.adoc[leveloffset=+2] include::modules/restoring-red-hat-quay-standalone.adoc[leveloffset=+2] +include::modules/standalone-to-operator-backup-restore.adoc[leveloffset=+1] + +//include::modules/configuring-oci-media-types.adoc[leveloffset=+1] + include::modules/garbage-collection.adoc[leveloffset=+1] -include::modules/proc_manage-quay-troubleshooting.adoc[leveloffset=+1] +include::modules/using-v2-ui.adoc[leveloffset=+1] + +include::modules/health-check-quay.adoc[leveloffset=+1] + -include::modules/con_schema.adoc[leveloffset=+1] +include::modules/branding-quay-deployment.adoc[leveloffset=+1] -[discrete] -== Additional resources +include::modules/con_schema.adoc[leveloffset=+1] \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 000000000..606d65758 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,59 @@ +site_name: Red Hat Quay API endpoint +site_url: https://github.com/quay +theme: + name: material + features: + - navigation.tabs + - navigation.sections + - toc.integrate + - navigation.top + - search.suggest + - search.highlight + - content.tabs.link + - content.code.annotation + - content.code.copy + language: en + palette: + - scheme: default + toggle: + icon: material/toggle-switch-off-outline + name: Switch to dark mode + primary: teal + accent: purple + - scheme: slate + toggle: + icon: material/toggle-switch + name: Switch to light mode + primary: teal + accent: lime + +plugins: + - social + - search + - swagger-ui-tag + +extra: + social: + - icon: fontawesome/brands/github-alt + link: https://github.com/quay + +markdown_extensions: + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.snippets + - admonition + - pymdownx.arithmatex: + generic: true + - footnotes + - pymdownx.details + - pymdownx.superfences + - pymdownx.mark + - attr_list + +# Docs Navigation +nav: +- API V2: api-v2.md + +copyright: | + © 2023 Red Hat Quay Development Team \ No newline at end of file diff --git a/modules/about-clair.adoc b/modules/about-clair.adoc index 07666ae6d..de31b0e7e 100644 --- a/modules/about-clair.adoc +++ b/modules/about-clair.adoc @@ -1,34 +1,162 @@ // Module included in the following assemblies: // // clair/master.adoc +// quay.io +// security :_content-type: CONCEPT [id="about-clair"] = About Clair -The content in this section highlights Clair releases, official Clair containers, and information about CVSS enrichment data. +Clair uses Common Vulnerability Scoring System (CVSS) data from the National Vulnerability Database (NVD) to enrich vulnerability data, which is a United States government repository of security-related information, including known vulnerabilities and security issues in various software components and systems. Using scores from the NVD provides Clair the following benefits: +* **Data synchronization**. Clair can periodically synchronize its vulnerability database with the NVD. This ensures that it has the latest vulnerability data. +* **Matching and enrichment**. Clair compares the metadata and identifiers of vulnerabilities it discovers in container images with the data from the NVD. This process involves matching the unique identifiers, such as Common Vulnerabilities and Exposures (CVE) IDs, to the entries in the NVD. When a match is found, Clair can enrich its vulnerability information with additional details from NVD, such as severity scores, descriptions, and references. +* **Severity Scores**. The NVD assigns severity scores to vulnerabilities, such as the Common Vulnerability Scoring System (CVSS) score, to indicate the potential impact and risk associated with each vulnerability. By incorporating NVD's severity scores, Clair can provide more context on the seriousness of the vulnerabilities it detects. + +If Clair finds vulnerabilities from NVD, a detailed and standardized assessment of the severity and potential impact of vulnerabilities detected within container images is reported to users on the UI. CVSS enrichment data provides Clair the following benefits: + +* *Vulnerability prioritization*. By utilizing CVSS scores, users can prioritize vulnerabilities based on their severity, helping them address the most critical issues first. +* *Assess Risk*. CVSS scores can help Clair users understand the potential risk a vulnerability poses to their containerized applications. +* *Communicate Severity*. CVSS scores provide Clair users a standardized way to communicate the severity of vulnerabilities across teams and organizations. +* *Inform Remediation Strategies*. CVSS enrichment data can guide {quayio} users in developing appropriate remediation strategies. +* *Compliance and Reporting*. Integrating CVSS data into reports generated by Clair can help organizations demonstrate their commitment to addressing security vulnerabilities and complying with industry standards and regulations. + +ifeval::["{context}" == "clair"] [id="clair-releases"] == Clair releases New versions of Clair are regularly released. The source code needed to build Clair is packaged as an archive and attached to each release. Clair releases can be found at link:https://github.com/quay/clair/releases[Clair releases]. - Release artifacts also include the `clairctl` command line interface tool, which obtains updater data from the internet by using an open host. +[discrete] +[id="clair-releases-48"] +=== Clair 4.8 + +Clair 4.8 was released on 24-10-28. The following changes have been made: + +* Clair on {productname} now requires that you update the Clair PostgreSQL database from version 13 to version 15. For more information about this procedure, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#upgrading-clair-postgresql-database[Upgrading the Clair PostgreSQL database]. + +* This release deprecates the updaters that rely on the Red Hat OVAL v2 security data in favor of the Red Hat VEX data. This change includes a database migration to delete all the vulnerabilities that originated from the OVAL v2 feeds. Because of this, there could be intermittent downtime in production environments before the VEX updater complete for the first time when no vulnerabilities exist. + +[id="clair-suse-enterprise-known-issue"] +=== Clair 4.8.0 known issues + +* When pushing Suse Enterprise Linux Images with *HIGH* image vulnerabilities, Clair 4.8.0 does not report these vulnerabilities. This is a known issue and will be fixed in a future version of {productname}. + +[discrete] +[id="clair-releases-474"] +=== Clair 4.7.4 + +Clair 4.7.4 was released on 2024-05-01. The following changes have been made: + +* The default layer download location has changed. For more information, see link:https://github.com/quay/clair/blob/release-4.7/Documentation/howto/deployment.md#disk-usage-considerations[Disk usage considerations]. + +[discrete] +[id="clair-releases-473"] +=== Clair 4.7.3 + +Clair 4.7.3 was released on 2024-02-26. The following changes have been made: + +* The minimum TLS version for Clair is now 1.2. Previously, servers allowed for 1.1 connections. + +[discrete] +[id="clair-releases-472"] +=== Clair 4.7.2 + +Clair 4.7.2 was released on 2023-10-09. The following changes have been made: + +* CRDA support has been removed. + +[discrete] +[id="clair-releases-471"] +=== Clair 4.7.1 + +Clair 4.7.1 was released as part of {productname} 3.9.1. The following changes have been made: + +* With this release, you can view unpatched vulnerabilities from {rhel} sources. If you want to view unpatched vulnerabilities, you can the set `ignore_unpatched` parameter to `false`. For example: ++ +[source,terminal] +---- +updaters: + config: + rhel: + ignore_unpatched: false +---- ++ +To disable this feature, you can set `ignore_unpatched` to `true`. + +[discrete] +[id="clair-releases-47"] +=== Clair 4.7 + +Clair 4.7 was released as part of {productname} 3.9, and includes support for the following features: + +* Native support for indexing Golang modules and RubeGems in container images. +* Change to link:OSV.dev[OSV.dev] as the vulnerability database source for any programming language package managers. +** This includes popular sources like GitHub Security Advisories or PyPA. +** This allows offline capability. +* Use of pyup.io for Python and CRDA for Java is suspended. +* Clair now supports Java, Golang, Python, and Ruby dependencies. +endif::[] + +[id="vuln-database-clair"] +== Clair vulnerability databases + +Clair uses the following vulnerability databases to report for issues in your images: + +* Ubuntu Oval database +* Debian Security Tracker +* {rhel} Oval database +* SUSE Oval database +* Oracle Oval database +* Alpine SecDB database +* VMware Photon OS database +* Amazon Web Services (AWS) UpdateInfo +* link:https://osv.dev/[Open Source Vulnerability (OSV) Database] + [id="clair-supported-languages"] -== Clair supported languages +== Clair supported dependencies -Clair supports the following languages: +Clair supports identifying and managing the following dependencies: + +* Java +* Golang * Python -* Java (CRDA must be enabled) +* Ruby + +This means that it can analyze and report on the third-party libraries and packages that a project in these languages relies on to work correctly. + +When an image that contains packages from a language unsupported by Clair is pushed to your repository, a vulnerability scan cannot be performed on those packages. Users do not receive an analysis or security report for unsupported dependencies or packages. As a result, the following consequences should be considered: + +* *Security risks*. Dependencies or packages that are not scanned for vulnerability might pose security risks to your organization. +* *Compliance issues*. If your organization has specific security or compliance requirements, unscanned, or partially scanned, container images might result in non-compliance with certain regulations. ++ +[NOTE] +==== +Scanned images are indexed, and a vulnerability report is created, but it might omit data from certain unsupported languages. For example, if your container image contains a Lua application, feedback from Clair is not provided because Clair does not detect it. It can detect other languages used in the container image, and shows detected CVEs for those languages. As a result, Clair images are _fully scanned_ based on what it supported by Clair. +==== +ifeval::["{context}" == "clair"] [id="clair-containers"] == Clair containers -Official downstream Clair containers bundled with {productname} can be found on the link:registry.redhat.io[Red Hat Ecosystem Catalog]. +Official downstream Clair containers bundled with {productname} can be found on the link:https://catalog.redhat.com[Red Hat Ecosystem Catalog]. -Official upstream containers are packaged and released as a container at link:quay.io/projectquay/clair[Quay.io/projectquay/clair]. The latest tag tracks the Git development branch. Version tags are built from the corresponding release. +Official upstream containers are packaged and released as a under the Clair project on link:https://quay.io/repository/projectquay/clair[Quay.io]. The latest tag tracks the Git development branch. Version tags are built from the corresponding release. +endif::[] + +//// +==== OSV mapping + +[cols="2,2",options="header"] +|=== +| Severity | Clair Severity +| | + +|=== +//// //// diff --git a/modules/accessing-swagger-ui.adoc b/modules/accessing-swagger-ui.adoc new file mode 100644 index 000000000..8d2437679 --- /dev/null +++ b/modules/accessing-swagger-ui.adoc @@ -0,0 +1,38 @@ +:_content-type: REFERENCE +[id="accessing-swagger-ui"] += Accessing {productname} Swagger UI + +{productname} administrators and users can interacting with the API by using the Swagger UI-an interactive web interface that compiles executable commands. The Swagger UI can be launched as a container that points to your {productname} instance's API discovery endpoint (`/api/v1/discovery`). After deploying the container, you can access the Swagger UI, which loads the OpenAPI specification for {productname} from the specified URL. {productname} administrators and users can explore the available endpoints and their structure. + +Use the following procedure to access the {productname} Swagger UI. + +.Procedure + +. Enter the following command to deploy the Swagger UI container, pointing the URL to your {productname}'s API discovery endpoint. For example: ++ +[source,terminal] +---- +$ podman run -p 8080:8080 -e SWAGGER_JSON_URL= docker.swagger.io/swaggerapi/swagger-ui +---- ++ +.Example output ++ +[source,terminal] +---- +--- +/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh +20-envsubst-on-templates.sh: Running envsubst on /etc/nginx/templates/default.conf.template to /etc/nginx/conf.d/default.conf +/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh +/docker-entrypoint.sh: Launching /docker-entrypoint.d/40-swagger-ui.sh +/docker-entrypoint.sh: Configuration complete; ready for start up +--- +---- + +. Navigate to the `localhost` URL. In this example, it is *http://localhost:8080/*. + +. Use the Swagger UI to test various API endpoints. For example, to create a new token for a user, you can click the *POST /api/v1/user/apptoken* endpoint -> *Try it out* -> *Execute* to generate an example `curl` command. ++ +[NOTE] +==== +Currently, server responses cannot be generated. This is because the Swagger UI is not set up to accept bearer tokens. As a result, the following error is returned for each command: `{"error": "CSRF token was invalid or missing."}`. As a workaround, you can copy this command into your terminal and manually add your bearer token, for example, `-H 'Authorization: Bearer '` +==== \ No newline at end of file diff --git a/modules/add-users-to-team.adoc b/modules/add-users-to-team.adoc new file mode 100644 index 000000000..d08378ce7 --- /dev/null +++ b/modules/add-users-to-team.adoc @@ -0,0 +1,43 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="adding-users-to-team"] += Adding users to a team by using the UI + +With administrative privileges to an Organization, you can add users and robot accounts to a team. When you add a user, +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +sends an email to that user. The user remains pending until they accept the invitation. + +Use the following procedure to add users or robot accounts to a team. + +.Procedure + +. On the {productname} landing page, click the name of your Organization. + +. In the navigation pane, click *Teams and Membership*. + +. Select the menu kebab of the team that you want to add users or robot accounts to. Then, click *Manage team members*. + +. Click *Add new member*. + +. In the textbox, enter information for one of the following: ++ +* A username from an account on the registry. +* The email address for a user account on the registry. +* The name of a robot account. The name must be in the form of +. ++ +[NOTE] +==== +Robot Accounts are immediately added to the team. For user accounts, an invitation to join is mailed to the user. Until the user accepts that invitation, the user remains in the *INVITED TO JOIN* state. After the user accepts the email invitation to join the team, they move from the *INVITED TO JOIN* list to the *MEMBERS* list for the Organization. +==== + +. Click *Add member*. \ No newline at end of file diff --git a/modules/adding-a-new-tag-to-image-api.adoc b/modules/adding-a-new-tag-to-image-api.adoc new file mode 100644 index 000000000..cfeeae0be --- /dev/null +++ b/modules/adding-a-new-tag-to-image-api.adoc @@ -0,0 +1,69 @@ +:_content-type: CONCEPT +[id="adding-tags-api"] += Adding a new tag to an image tag to an image by using the API + +You can add a new tag, or restore an old one, to an image by using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. You can change which image a tag points to or create a new tag by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changetag[`PUT /api/v1/repository/{repository}/tag/{tag}`] command: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": "" + }' \ + https:///api/v1/repository///tag/ +---- ++ +.Example output ++ +[source,terminal] +---- +"Updated" +---- + +. You can restore a repository tag to its previous image by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#restoretag[`POST /api/v1/repository/{repository}/tag/{tag}/restore`] command. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": + }' \ + quay-server.example.com/api/v1/repository/quayadmin/busybox/tag/test/restore +---- ++ +.Example output ++ +[source,terminal] +---- +{} +---- + +. To see a list of tags after creating a new tag you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] command. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test", "reversion": false, "start_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715697708, "end_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:41:48 -0000", "expiration": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715695488, "end_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:04:48 -0000", "expiration": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715631517, "end_ts": 1715695488, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Mon, 13 May 2024 20:18:37 -0000", "expiration": "Tue, 14 May 2024 14:04:48 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/adding-a-new-tag-to-image.adoc b/modules/adding-a-new-tag-to-image.adoc new file mode 100644 index 000000000..fa350436b --- /dev/null +++ b/modules/adding-a-new-tag-to-image.adoc @@ -0,0 +1,23 @@ +:_content-type: CONCEPT +[id="adding-a-new-tag-to-image"] += Adding a new image tag to an image by using the UI + +You can add a new tag to an image in +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab, then click *Add new tag*. + +. Enter a name for the tag, then, click *Create tag*. ++ +The new tag is now listed on the *Repository Tags* page. \ No newline at end of file diff --git a/modules/adding-ca-certs-to-config.adoc b/modules/adding-ca-certs-to-config.adoc new file mode 100644 index 000000000..bcf8fb476 --- /dev/null +++ b/modules/adding-ca-certs-to-config.adoc @@ -0,0 +1,104 @@ +[id="adding-ca-certs-to-config"] += Adding additional Certificate Authorities to {productname-ocp} + +The following example shows you how to add additional Certificate Authorities to your {productname-ocp} deployment. + +.Prerequisites + +* You have base64 decoded the original config bundle into a `config.yaml` file. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-config-cli-download[Downloading the existing configuration]. +* You have a Certificate Authority (CA) file or files. + +.Procedure + +. Create a new YAML file, for example, `extra-ca-certificate-config-bundle-secret.yaml`: ++ +[source,terminal] +---- +$ touch extra-ca-certificate-config-bundle-secret.yaml +---- + +. Create the `extra-ca-certificate-config-bundle-secret` resource. + +.. Create the resource by entering the following command: ++ +[source,terminal] +---- +$ oc -n create secret generic extra-ca-certificate-config-bundle-secret \ + --from-file=config.yaml= \ <1> + --from-file=extra_ca_cert_= \ <2> + --from-file=extra_ca_cert_= \ <3> + --from-file=extra_ca_cert_= \ <4> + --dry-run=client -o yaml > extra-ca-certificate-config-bundle-secret.yaml +---- +<1> Where `` is your `base64 decoded` `config.yaml` file. +<2> The extra CA file to be added to into the system trust bundle. +<3> Optional. A second CA file to be added into the system trust bundle. +<4> Optional. A third CA file to be added into the system trust bundle. + +. Optional. You can check the content of the `extra-ca-certificate-config-bundle-secret.yaml` file by entering the following command: ++ +[source,terminal] +---- +$ cat extra-ca-certificate-config-bundle-secret.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +apiVersion: v1 +data: + config.yaml: QUxMT1dfUFVMTFNfV0lUSE9VVF9TVFJJQ1RfTE9HR0lORzogZmFsc2UKQVVUSEVOVElDQVRJT05fVFlQRTogRGF0YWJhc2UKREVGQVVMVF9UQUdfRVhQSVJBVElPTjogMncKUFJFRkVSU... + extra_ca_cert_certificate-one: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQyVENDQXNHZ0F3SUJBZ0lVS2xOai90VUJBZHBkNURjYkdRQUo4anRuKzd3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2ZERUxNQWtHQ... + extra_ca_cert_certificate-three: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ0ekNDQXN1Z0F3SUJBZ0lVQmJpTXNUeExjM0s4ODNWby9GTThsWXlOS2lFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2ZERUxNQWtHQ... + extra_ca_cert_certificate-two: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ0ekNDQXN1Z0F3SUJBZ0lVVFVPTXZ2YVdFOFRYV3djYTNoWlBCTnV2QjYwd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2ZERUxNQWtHQ... +kind: Secret +metadata: + creationTimestamp: null + name: custom-ssl-config-bundle-secret + namespace: +---- + +. Create the `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc create -n -f extra-ca-certificate-config-bundle-secret.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +secret/extra-ca-certificate-config-bundle-secret created +---- + +. Update the `QuayRegistry` YAML file to reference the `extra-ca-certificate-config-bundle-secret` object by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"configBundleSecret":"extra-ca-certificate-config-bundle-secret"}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. Ensure that your `QuayRegistry` YAML file has been updated to use the extra CA certificate `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml +---- ++ +.Example output ++ +[source,terminal] +---- +# ... + configBundleSecret: extra-ca-certificate-config-bundle-secret +# ... +---- \ No newline at end of file diff --git a/modules/adding-managing-labels-api.adoc b/modules/adding-managing-labels-api.adoc new file mode 100644 index 000000000..11505ead5 --- /dev/null +++ b/modules/adding-managing-labels-api.adoc @@ -0,0 +1,89 @@ +:_content-type: CONCEPT +[id="adding-managing-labels-api"] += Adding and managing labels by using the API + +{productname} administrators can add and manage labels for tags with the API by using the following procedure. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepomanifest[`GET /api/v1/repository/{repository}/manifest/{manifestref}`] command to retrieve the details of a specific manifest in a repository: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest/ +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listmanifestlabels[`GET /api/v1/repository/{repository}/manifest/{manifestref}/labels`] command to retrieve a list of labels for a specific manifest: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest//labels +---- ++ +.Example output ++ +[source,terminal] +---- +{"labels": [{"id": "e9f717d2-c1dd-4626-802d-733a029d17ad", "key": "org.opencontainers.image.url", "value": "https://github.com/docker-library/busybox", "source_type": "manifest", "media_type": "text/plain"}, {"id": "2d34ec64-4051-43ad-ae06-d5f81003576a", "key": "org.opencontainers.image.version", "value": "1.36.1-glibc", "source_type": "manifest", "media_type": "text/plain"}]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getmanifestlabel[`GET /api/v1/repository/{repository}/manifest/{manifestref}/labels/{labelid}`] command to obtain information about a specific manifest: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest//labels/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": "e9f717d2-c1dd-4626-802d-733a029d17ad", "key": "org.opencontainers.image.url", "value": "https://github.com/docker-library/busybox", "source_type": "manifest", "media_type": "text/plain"} +---- + +. You can add an additional label to a manifest in a given repository with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#addmanifestlabel[`POST /api/v1/repository/{repository}/manifest/{manifestref}/labels`] command. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "key": "", + "value": "", + "media_type": "" + }' \ + https:///api/v1/repository//manifest//labels +---- ++ +.Example output ++ +[source,terminal] +---- +{"label": {"id": "346593fd-18c8-49db-854f-4cb1fb76ff9c", "key": "example-key", "value": "example-value", "source_type": "api", "media_type": "text/plain"}} +---- + +. You can delete a label using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletemanifestlabel[`DELETE /api/v1/repository/{repository}/manifest/{manifestref}/labels/{labelid}`] command: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//manifest//labels/ +---- ++ +This command does not return output in the CLI. You can use one of the commands above to ensure that it was successfully removed. \ No newline at end of file diff --git a/modules/adding-managing-labels.adoc b/modules/adding-managing-labels.adoc new file mode 100644 index 000000000..8e85650a2 --- /dev/null +++ b/modules/adding-managing-labels.adoc @@ -0,0 +1,30 @@ +:_content-type: CONCEPT +[id="adding-managing-labels"] += Adding and managing labels by using the UI + +Administrators can add and manage labels for tags by using the following procedure. + +.Procedure + +. On the v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab for an image and select *Edit labels*. + +. In the *Edit labels* window, click *Add new label*. + +. Enter a label for the image tag using the `key=value` format, for example, `com.example.release-date=2023-11-14`. ++ +[NOTE] +==== +The following error is returned when failing to use the `key=value` format: `Invalid label format, must be key value separated by =`. +==== + +. Click the whitespace of the box to add the label. + +. Optional. Add a second label. + +. Click *Save labels* to save the label to the image tag. The following notification is returned: `Created labels successfully`. + +. Optional. Click the same image tag's menu kebab -> *Edit labels* -> *X* on the label to remove it; alternatively, you can edit the text. Click *Save labels*. The label is now removed or edited. \ No newline at end of file diff --git a/modules/adjust-access-user-repo-api.adoc b/modules/adjust-access-user-repo-api.adoc new file mode 100644 index 000000000..c26a11259 --- /dev/null +++ b/modules/adjust-access-user-repo-api.adoc @@ -0,0 +1,64 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="adjust-access-user-repo-api"] += Adjusting access settings for a repository by using the API + +Use the following procedure to adjust access settings for a user or robot account for a repository by using the API. + +.Prerequisites + +* You have created a user account or robot account. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeuserpermissions[`PUT /api/v1/repository/{repository}/permissions/user/{username}`] command to change the permissions of a user: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": "admin"}' \ + https:///api/v1/repository///permissions/user/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"role": "admin", "name": "quayadmin+test", "is_robot": true, "avatar": {"name": "quayadmin+test", "hash": "ca9afae0a9d3ca322fc8a7a866e8476dd6c98de543decd186ae090e420a88feb", "color": "#8c564b", "kind": "robot"}} +---- + +. To delete the current permission, you can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteuserpermissions[`DELETE /api/v1/repository/{repository}/permissions/user/{username}`] command: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///permissions/user/ +---- ++ +This command does not return any output in the CLI. Instead, you can check that the permissions were deleted by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepouserpermissions[`GET /api/v1/repository/{repository}/permissions/user/`] command: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///permissions/user// +---- ++ +.Example output ++ +[source,terminal] +---- +{"message":"User does not have permission for repo."} +---- \ No newline at end of file diff --git a/modules/adjusting-repository-access-via-the-api.adoc b/modules/adjusting-repository-access-via-the-api.adoc new file mode 100644 index 000000000..cb210ac8a --- /dev/null +++ b/modules/adjusting-repository-access-via-the-api.adoc @@ -0,0 +1,38 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="adjusting-image-repository-visibility-the-api"] += Adjusting repository visibility by using the API + +The visibility of your repository can be set to `private` or `public` by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/red_hat_quay_api_reference/index#changerepovisibility[`POST /api/v1/repository/{repository}/changevisibility`] command. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created a repository. + +.Procedure + +* You can change the visibility of your repository to public or private by specifying the desired option in the `visibility` schema. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "visibility": "private" + }' \ + "https://quay-server.example.com/api/v1/repository/my_namespace/test_repo_three/changevisibility" +---- ++ +.Example output ++ +[source,terminal] +---- +{"success": true} +---- \ No newline at end of file diff --git a/modules/adjusting-repository-visibility-via-the-ui.adoc b/modules/adjusting-repository-visibility-via-the-ui.adoc new file mode 100644 index 000000000..e1efa09c3 --- /dev/null +++ b/modules/adjusting-repository-visibility-via-the-ui.adoc @@ -0,0 +1,22 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="adjusting-image-repository-visibility-the-ui"] += Adjusting repository visibility by using the UI + +You can adjust the visibility of a repository to make it either public of private by using the {productname} UI. + +.Procedure + +. On the {productname} UI, click *Repositories* in the navigation pane. + +. Click the name of a repository. + +. Click *Settings* in the navigation pane. + +. Click *Repository visibility*. + +. Click *Make private*. The repository is made private, and only users on the permissions list can view and interact with it. \ No newline at end of file diff --git a/modules/advanced-quay-poc-deployment.adoc b/modules/advanced-quay-poc-deployment.adoc new file mode 100644 index 000000000..c696eaf28 --- /dev/null +++ b/modules/advanced-quay-poc-deployment.adoc @@ -0,0 +1,5 @@ +:_content-type: PROCEDURE +[id="advanced-quay-poc-deployment"] += Proof of concept deployment using SSL/TLS certificates + +Use the following sections to configure a proof of concept {productname} deployment with SSL/TLS certificates. \ No newline at end of file diff --git a/modules/allow-access-user-repo.adoc b/modules/allow-access-user-repo.adoc new file mode 100644 index 000000000..9b88be878 --- /dev/null +++ b/modules/allow-access-user-repo.adoc @@ -0,0 +1,66 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="allow-access-user-repo"] += Adjusting access settings for a repository by using the UI + +Use the following procedure to adjust access settings for a user or robot account for a repository using the v2 UI. + +.Prerequisites + +* You have created a user account or robot account. + +.Procedure + +. Log into +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +ifeval::["{context}" == "quay-security"] +{productname}. +endif::[] + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository, for example, `quayadmin/busybox`. + +. Click the *Settings* tab. + +. Optional. Click *User and robot permissions*. You can adjust the settings for a user or robot account by clicking the dropdown menu option under *Permissions*. You can change the settings to *Read*, *Write*, or *Admin*. ++ +* *Read*. The User or Robot Account can view and pull from the repository. +* *Write*. The User or Robot Account can read (pull) from and write (push) to the repository. +* *Admin*. The User or Robot account has access to pull from, and push to, the repository, plus the ability to do administrative tasks associated with the repository. + +//// +. Optional. Click *Events and notifications*. You can create an event and notification by clicking *Create Notification*. The following event options are available: ++ +* Push to Repository +* Package Vulnerability Found +* Image build failed +* Image build queued +* Image build started +* Image build success +* Image build cancelled ++ +Then, issue a notification. The following options are available: ++ +* Email Notification +* Flowdock Team Notification +* HipChat Room Notification +* Slack Notification +* Webhook POST ++ +After selecting an event option and the method of notification, include a *Room ID #*, a *Room Notification Token*, then, click *Submit*. + +. Optional. Click *Repository visibility*. You can make the repository private, or public, by clicking *Make Public*. + +. Optional. Click *Delete repository*. You can delete the repository by clicking *Delete Repository*. +//// \ No newline at end of file diff --git a/modules/api-appspecifictokens-createAppToken.adoc b/modules/api-appspecifictokens-createAppToken.adoc index 7223dd3b2..305c85110 100644 --- a/modules/api-appspecifictokens-createAppToken.adoc +++ b/modules/api-appspecifictokens-createAppToken.adoc @@ -19,8 +19,8 @@ Description of a new token. [options="header", width=100%, cols=".^3a,.^9a,.^4a"] |=== |Name|Description|Schema -|**friendlyName** + -_optional_|Friendly name to help identify the token|string +|**title** + +_required_|Friendly name to help identify the token|string |=== @@ -36,3 +36,16 @@ _optional_|Friendly name to help identify the token|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- + $ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "title": "MyAppToken" + }' \ + "http://quay-server.example.com/api/v1/user/apptoken" +---- diff --git a/modules/api-appspecifictokens-getAppToken.adoc b/modules/api-appspecifictokens-getAppToken.adoc index c117ef3fc..79b81bb50 100644 --- a/modules/api-appspecifictokens-getAppToken.adoc +++ b/modules/api-appspecifictokens-getAppToken.adoc @@ -33,3 +33,12 @@ _required_|The uuid of the app specific token|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken/" +---- \ No newline at end of file diff --git a/modules/api-appspecifictokens-listAppTokens.adoc b/modules/api-appspecifictokens-listAppTokens.adoc index 92f78399f..7c0a737e3 100644 --- a/modules/api-appspecifictokens-listAppTokens.adoc +++ b/modules/api-appspecifictokens-listAppTokens.adoc @@ -34,3 +34,12 @@ _optional_|If true, only returns those tokens expiring soon|boolean |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken" +---- \ No newline at end of file diff --git a/modules/api-appspecifictokens-revokeAppToken.adoc b/modules/api-appspecifictokens-revokeAppToken.adoc index 8391e4a6d..9daa9a072 100644 --- a/modules/api-appspecifictokens-revokeAppToken.adoc +++ b/modules/api-appspecifictokens-revokeAppToken.adoc @@ -33,3 +33,12 @@ _required_|The uuid of the app specific token|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken/" +---- \ No newline at end of file diff --git a/modules/api-build-cancelRepoBuild.adoc b/modules/api-build-cancelRepoBuild.adoc index 9275bbcb5..015cb8180 100644 --- a/modules/api-build-cancelRepoBuild.adoc +++ b/modules/api-build-cancelRepoBuild.adoc @@ -16,10 +16,10 @@ Cancels a repository build. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**build_uuid** + -_required_|The UUID of the build|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**build_uuid** + +_required_|The UUID of the build|string |=== diff --git a/modules/api-build-getRepoBuild.adoc b/modules/api-build-getRepoBuild.adoc index 395e4c2ef..7f2a54946 100644 --- a/modules/api-build-getRepoBuild.adoc +++ b/modules/api-build-getRepoBuild.adoc @@ -16,10 +16,10 @@ Returns information about a build. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**build_uuid** + -_required_|The UUID of the build|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**build_uuid** + +_required_|The UUID of the build|string |=== diff --git a/modules/api-build-getRepoBuildLogs.adoc b/modules/api-build-getRepoBuildLogs.adoc index 2074a29cc..818643a25 100644 --- a/modules/api-build-getRepoBuildLogs.adoc +++ b/modules/api-build-getRepoBuildLogs.adoc @@ -16,10 +16,10 @@ Return the build logs for the build specified by the build uuid. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**build_uuid** + -_required_|The UUID of the build|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**build_uuid** + +_required_|The UUID of the build|string |=== diff --git a/modules/api-build-getRepoBuildStatus.adoc b/modules/api-build-getRepoBuildStatus.adoc index 9273ee235..3611a3f1e 100644 --- a/modules/api-build-getRepoBuildStatus.adoc +++ b/modules/api-build-getRepoBuildStatus.adoc @@ -16,10 +16,10 @@ Return the status for the builds specified by the build uuids. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**build_uuid** + -_required_|The UUID of the build|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**build_uuid** + +_required_|The UUID of the build|string |=== diff --git a/modules/api-build-requestRepoBuild.adoc b/modules/api-build-requestRepoBuild.adoc index f11e65f11..4dffc2ea2 100644 --- a/modules/api-build-requestRepoBuild.adoc +++ b/modules/api-build-requestRepoBuild.adoc @@ -41,7 +41,7 @@ _optional_|Path to a dockerfile. You can only specify this or subdirectory.|stri _optional_|Pass in the context for the dockerfile. This is optional.|string |**pull_robot** + _optional_|Username of a Quay robot account to use as pull credentials|string -|**docker_tags** + +|**tags** + _optional_|The tags to which the built images will be pushed. If none specified, "latest" is used.|array of string + `non-empty` `unique` |=== diff --git a/modules/api-discovery-discovery.adoc b/modules/api-discovery-discovery.adoc index 4051b2abe..883f3db92 100644 --- a/modules/api-discovery-discovery.adoc +++ b/modules/api-discovery-discovery.adoc @@ -32,3 +32,12 @@ _optional_|Whether to include internal APIs.|boolean |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/discovery?query=true" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-error-getErrorDescription.adoc b/modules/api-error-getErrorDescription.adoc index 91cc69255..66f9b066e 100644 --- a/modules/api-error-getErrorDescription.adoc +++ b/modules/api-error-getErrorDescription.adoc @@ -31,3 +31,12 @@ _required_|The error code identifying the type of error.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/error/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-global-messages.adoc b/modules/api-global-messages.adoc new file mode 100644 index 000000000..86ba49e3c --- /dev/null +++ b/modules/api-global-messages.adoc @@ -0,0 +1,55 @@ +:_content-type: PROCEDURE +[id="api-global-messages"] += Global messages + +Global messages can be created, obtained, or deleted by using the {productname} API. +Use the following procedure to create, obtain, or delete a global message. + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +. Create a message by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createglobalmessage[`POST /api/v1/message`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/messages" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "message": { + "content": "Hi", + "media_type": "text/plain", + "severity": "info" + } + }' +---- ++ +This command does not return output. + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getglobalmessages[`GET /api/v1/messages`] command to return the list of global messages: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/messages" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"messages": [{"uuid": "ecababd4-3451-4458-b5db-801684137444", "content": "Hi", "severity": "info", "media_type": "text/plain"}]} +---- + +. Delete the global message by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteglobalmessage[`DELETE /api/v1/message/{uuid}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/message/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/api-globalmessages-createGlobalMessage.adoc b/modules/api-globalmessages-createGlobalMessage.adoc index 1ad0fd71f..d3f1e451b 100644 --- a/modules/api-globalmessages-createGlobalMessage.adoc +++ b/modules/api-globalmessages-createGlobalMessage.adoc @@ -20,7 +20,7 @@ Create a new message |=== |Name|Description|Schema |**message** + -_optional_|A single message|object +_required_|A single message|object |=== @@ -36,3 +36,19 @@ _optional_|A single message|object |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST "https:///api/v1/messages" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "message": { + "content": "Hi", + "media_type": "text/plain", + "severity": "info" + } + }' +---- \ No newline at end of file diff --git a/modules/api-globalmessages-deleteGlobalMessage.adoc b/modules/api-globalmessages-deleteGlobalMessage.adoc index 387c8614f..656cd9b3c 100644 --- a/modules/api-globalmessages-deleteGlobalMessage.adoc +++ b/modules/api-globalmessages-deleteGlobalMessage.adoc @@ -33,3 +33,11 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/message/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-globalmessages-getGlobalMessages.adoc b/modules/api-globalmessages-getGlobalMessages.adoc index 7b50a8cc2..39c2319e1 100644 --- a/modules/api-globalmessages-getGlobalMessages.adoc +++ b/modules/api-globalmessages-getGlobalMessages.adoc @@ -21,3 +21,11 @@ Return a super users messages. |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https:///api/v1/messages" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-logs-exportOrgLogs.adoc b/modules/api-logs-exportOrgLogs.adoc index ef1e847e7..654d8d85c 100644 --- a/modules/api-logs-exportOrgLogs.adoc +++ b/modules/api-logs-exportOrgLogs.adoc @@ -61,3 +61,19 @@ _optional_|The e-mail address at which to e-mail a link to the exported logs|str |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "", + "endtime": "", + "callback_email": "org.logs@example.com" + }' \ + "http:///api/v1/organization/{orgname}/exportlogs" +---- \ No newline at end of file diff --git a/modules/api-logs-exportRepoLogs.adoc b/modules/api-logs-exportRepoLogs.adoc index f33f4a12a..b34b59207 100644 --- a/modules/api-logs-exportRepoLogs.adoc +++ b/modules/api-logs-exportRepoLogs.adoc @@ -61,3 +61,19 @@ _optional_|The e-mail address at which to e-mail a link to the exported logs|str |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "2024-01-01", + "endtime": "2024-06-18", + "callback_url": "http://your-callback-url.example.com" + }' \ + "http:///api/v1/repository/{repository}/exportlogs" +---- \ No newline at end of file diff --git a/modules/api-logs-exportUserLogs.adoc b/modules/api-logs-exportUserLogs.adoc index 6b812117a..a21912db1 100644 --- a/modules/api-logs-exportUserLogs.adoc +++ b/modules/api-logs-exportUserLogs.adoc @@ -51,3 +51,19 @@ _optional_|The e-mail address at which to e-mail a link to the exported logs|str |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "", + "endtime": "", + "callback_email": "your.email@example.com" + }' \ + "http:///api/v1/user/exportlogs" +---- \ No newline at end of file diff --git a/modules/api-logs-getAggregateOrgLogs.adoc b/modules/api-logs-getAggregateOrgLogs.adoc index 86bce807a..4693edca4 100644 --- a/modules/api-logs-getAggregateOrgLogs.adoc +++ b/modules/api-logs-getAggregateOrgLogs.adoc @@ -48,3 +48,13 @@ _optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/organization/{orgname}/aggregatelogs" +---- diff --git a/modules/api-logs-getAggregateRepoLogs.adoc b/modules/api-logs-getAggregateRepoLogs.adoc index c73fc4f9d..993d71b0b 100644 --- a/modules/api-logs-getAggregateRepoLogs.adoc +++ b/modules/api-logs-getAggregateRepoLogs.adoc @@ -46,3 +46,13 @@ _optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/repository///aggregatelogs?starttime=2024-01-01&endtime=2024-06-18"" +---- \ No newline at end of file diff --git a/modules/api-logs-getAggregateUserLogs.adoc b/modules/api-logs-getAggregateUserLogs.adoc index 72cc5e11b..6fe558bc7 100644 --- a/modules/api-logs-getAggregateUserLogs.adoc +++ b/modules/api-logs-getAggregateUserLogs.adoc @@ -38,3 +38,13 @@ _optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/user/aggregatelogs?performer=&starttime=&endtime=" +---- \ No newline at end of file diff --git a/modules/api-logs-listOrgLogs.adoc b/modules/api-logs-listOrgLogs.adoc index 2e78fc7dd..5ea0b177f 100644 --- a/modules/api-logs-listOrgLogs.adoc +++ b/modules/api-logs-listOrgLogs.adoc @@ -50,3 +50,13 @@ _optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "http:///api/v1/organization/{orgname}/logs" +---- \ No newline at end of file diff --git a/modules/api-logs-listRepoLogs.adoc b/modules/api-logs-listRepoLogs.adoc index 18d9462f1..45c032744 100644 --- a/modules/api-logs-listRepoLogs.adoc +++ b/modules/api-logs-listRepoLogs.adoc @@ -48,3 +48,13 @@ _optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "http:///api/v1/repository/{repository}/logs" +---- \ No newline at end of file diff --git a/modules/api-logs-listUserLogs.adoc b/modules/api-logs-listUserLogs.adoc index 622b98cb4..27d68f5aa 100644 --- a/modules/api-logs-listUserLogs.adoc +++ b/modules/api-logs-listUserLogs.adoc @@ -40,3 +40,10 @@ _optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" "/api/v1/user/logs" +---- \ No newline at end of file diff --git a/modules/api-manifest-addManifestLabel.adoc b/modules/api-manifest-addManifestLabel.adoc index 75ace96b0..8ca4c9fe1 100644 --- a/modules/api-manifest-addManifestLabel.adoc +++ b/modules/api-manifest-addManifestLabel.adoc @@ -16,10 +16,10 @@ Adds a new label into the tag manifest. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**manifestref** + -_required_|The digest of the manifest|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string |=== @@ -32,11 +32,11 @@ Adds a label to a manifest |=== |Name|Description|Schema |**key** + -_optional_|The key for the label|string +_required_|The key for the label|string |**value** + -_optional_|The value for the label|string +_required_|The value for the label|string |**media_type** + -_optional_|The media type for this label| +_required_|The media type for this label| |=== @@ -52,3 +52,18 @@ _optional_|The media type for this label| |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "key": "", + "value": "", + "media_type": "" + }' \ + https:///api/v1/repository//manifest//labels +---- \ No newline at end of file diff --git a/modules/api-manifest-deleteManifestLabel.adoc b/modules/api-manifest-deleteManifestLabel.adoc index 08fca4498..2567cd29f 100644 --- a/modules/api-manifest-deleteManifestLabel.adoc +++ b/modules/api-manifest-deleteManifestLabel.adoc @@ -16,12 +16,12 @@ Deletes an existing label from a manifest. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |path|**manifestref** + _required_|The digest of the manifest|string |path|**labelid** + _required_|The ID of the label|string -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -37,3 +37,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//manifest//labels/ +---- \ No newline at end of file diff --git a/modules/api-manifest-getManifestLabel.adoc b/modules/api-manifest-getManifestLabel.adoc index 28b4a716b..b06777204 100644 --- a/modules/api-manifest-getManifestLabel.adoc +++ b/modules/api-manifest-getManifestLabel.adoc @@ -16,12 +16,12 @@ Retrieves the label with the specific ID under the manifest. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |path|**manifestref** + _required_|The digest of the manifest|string |path|**labelid** + _required_|The ID of the label|string -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -37,3 +37,13 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest//labels/ +---- \ No newline at end of file diff --git a/modules/api-manifest-getRepoManifest.adoc b/modules/api-manifest-getRepoManifest.adoc index 291d6ca4a..7fb537feb 100644 --- a/modules/api-manifest-getRepoManifest.adoc +++ b/modules/api-manifest-getRepoManifest.adoc @@ -16,10 +16,10 @@ [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**manifestref** + -_required_|The digest of the manifest|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string |=== @@ -35,3 +35,13 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest/ +---- \ No newline at end of file diff --git a/modules/api-manifest-listManifestLabels.adoc b/modules/api-manifest-listManifestLabels.adoc index 9c153e05c..99f8b6870 100644 --- a/modules/api-manifest-listManifestLabels.adoc +++ b/modules/api-manifest-listManifestLabels.adoc @@ -16,10 +16,10 @@ [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**manifestref** + -_required_|The digest of the manifest|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string |=== @@ -46,3 +46,13 @@ _optional_|If specified, only labels matching the given prefix will be returned| |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest//labels +---- \ No newline at end of file diff --git a/modules/api-mirror-changeRepoMirrorConfig.adoc b/modules/api-mirror-changeRepoMirrorConfig.adoc index 8af4d44b7..08a608c46 100644 --- a/modules/api-mirror-changeRepoMirrorConfig.adoc +++ b/modules/api-mirror-changeRepoMirrorConfig.adoc @@ -56,9 +56,33 @@ _optional_||object [options="header", width=100%, cols=".^2a,.^14a,.^4a"] |=== |HTTP Code|Description|Schema -|200|Successful invocation| +|201|Successful invocation| |400|Bad Request|<<_apierror,ApiError>> |401|Session required|<<_apierror,ApiError>> |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "is_enabled": , <1> + "external_reference": "", + "external_registry_username": "", + "external_registry_password": "", + "sync_start_date": "", + "sync_interval": , + "robot_username": "", + "root_rule": { + "rule": "", + "rule_type": "" + } + }' +---- +<1> Disables automatic synchronization. \ No newline at end of file diff --git a/modules/api-mirror-createRepoMirrorConfig.adoc b/modules/api-mirror-createRepoMirrorConfig.adoc index 35f1d3de2..b5a61b5d2 100644 --- a/modules/api-mirror-createRepoMirrorConfig.adoc +++ b/modules/api-mirror-createRepoMirrorConfig.adoc @@ -32,19 +32,19 @@ Create the repository mirroring configuration. |**is_enabled** + _optional_|Used to enable or disable synchronizations.|boolean |**external_reference** + -_optional_|Location of the external repository.|string +_required_|Location of the external repository.|string |**external_registry_username** + _optional_|Username used to authenticate with external registry.| |**external_registry_password** + _optional_|Password used to authenticate with external registry.| |**sync_start_date** + -_optional_|Determines the next time this repository is ready for synchronization.|string +_required_|Determines the next time this repository is ready for synchronization.|string |**sync_interval** + -_optional_|Number of seconds after next_start_date to begin synchronizing.|integer +_required_|Number of seconds after next_start_date to begin synchronizing.|integer |**robot_username** + -_optional_|Username of robot which will be used for image pushes.|string +_required_|Username of robot which will be used for image pushes.|string |**root_rule** + -_optional_|A list of glob-patterns used to determine which tags should be synchronized.|object +_required_|A list of glob-patterns used to determine which tags should be synchronized.|object |**external_registry_config** + _optional_||object |=== @@ -62,3 +62,26 @@ _optional_||object |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "is_enabled": , + "external_reference": "", + "external_registry_username": "", + "external_registry_password": "", + "sync_start_date": "", + "sync_interval": , + "robot_username": "", + "root_rule": { + "rule": "", + "rule_type": "" + } + }' +---- \ No newline at end of file diff --git a/modules/api-mirror-getRepoMirrorConfig.adoc b/modules/api-mirror-getRepoMirrorConfig.adoc index fb986e93b..6baf05bf4 100644 --- a/modules/api-mirror-getRepoMirrorConfig.adoc +++ b/modules/api-mirror-getRepoMirrorConfig.adoc @@ -33,3 +33,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-mirror-syncCancel.adoc b/modules/api-mirror-syncCancel.adoc index c18c403b1..8c4b879d1 100644 --- a/modules/api-mirror-syncCancel.adoc +++ b/modules/api-mirror-syncCancel.adoc @@ -33,3 +33,11 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror/sync-cancel" \ +---- \ No newline at end of file diff --git a/modules/api-mirror-syncNow.adoc b/modules/api-mirror-syncNow.adoc index 36492fca3..b2d20c7dc 100644 --- a/modules/api-mirror-syncNow.adoc +++ b/modules/api-mirror-syncNow.adoc @@ -33,3 +33,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror/sync-now" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-changeOrganizationQuota.adoc b/modules/api-namespacequota-changeOrganizationQuota.adoc index 78653f6ca..82fdc3476 100644 --- a/modules/api-namespacequota-changeOrganizationQuota.adoc +++ b/modules/api-namespacequota-changeOrganizationQuota.adoc @@ -33,6 +33,10 @@ Description of a new organization quota |Name|Description|Schema |**limit_bytes** + _optional_|Number of bytes the organization is allowed|integer + +|**limits** + +_optional_|Human readable storage capacity of the organization. Accepts SI units like Mi, Gi, or Ti, as well as non-standard units like GB or MB. Must be mutually exclusive with `limit_bytes`.|string + |=== @@ -48,3 +52,17 @@ _optional_|Number of bytes the organization is allowed|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- \ No newline at end of file diff --git a/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc b/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc index 2fdc718ea..c75b4ba41 100644 --- a/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc +++ b/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc @@ -52,3 +52,18 @@ _optional_|Quota threshold, in percent of quota|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "type": "", + "threshold_percent": + }' +---- \ No newline at end of file diff --git a/modules/api-namespacequota-createOrganizationQuota.adoc b/modules/api-namespacequota-createOrganizationQuota.adoc index 11b0cfdd8..f5c59bcc9 100644 --- a/modules/api-namespacequota-createOrganizationQuota.adoc +++ b/modules/api-namespacequota-createOrganizationQuota.adoc @@ -30,7 +30,10 @@ Description of a new organization quota |=== |Name|Description|Schema |**limit_bytes** + -_optional_|Number of bytes the organization is allowed|integer +_required_|Number of bytes the organization is allowed|integer + +|**limits** + +_optional_|Human readable storage capacity of the organization. Accepts SI units like Mi, Gi, or Ti, as well as non-standard units like GB or MB. Must be mutually exclusive with `limit_bytes`.|string |=== @@ -46,3 +49,18 @@ _optional_|Number of bytes the organization is allowed|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240, + "limits": "10 Gi" + }' +---- \ No newline at end of file diff --git a/modules/api-namespacequota-createOrganizationQuotaLimit.adoc b/modules/api-namespacequota-createOrganizationQuotaLimit.adoc index 9ce2eefc5..a3fccfd49 100644 --- a/modules/api-namespacequota-createOrganizationQuotaLimit.adoc +++ b/modules/api-namespacequota-createOrganizationQuotaLimit.adoc @@ -32,9 +32,9 @@ Description of a new organization quota limit |=== |Name|Description|Schema |**type** + -_optional_|Type of quota limit: "Warning" or "Reject"|string +_required_|Type of quota limit: "Warning" or "Reject"|string |**threshold_percent** + -_optional_|Quota threshold, in percent of quota|integer +_required_|Quota threshold, in percent of quota|integer |=== @@ -50,3 +50,19 @@ _optional_|Quota threshold, in percent of quota|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//quota//limit" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 21474836480, + "type": "Reject", <1> + "threshold_percent": 90 <2> + }' +---- \ No newline at end of file diff --git a/modules/api-namespacequota-deleteOrganizationQuota.adoc b/modules/api-namespacequota-deleteOrganizationQuota.adoc index 6f3947aac..8198db994 100644 --- a/modules/api-namespacequota-deleteOrganizationQuota.adoc +++ b/modules/api-namespacequota-deleteOrganizationQuota.adoc @@ -35,3 +35,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc b/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc index afdaf6f93..a413f0f67 100644 --- a/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc +++ b/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc @@ -37,3 +37,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-getOrganizationQuota.adoc b/modules/api-namespacequota-getOrganizationQuota.adoc index 51d24f732..3e5797264 100644 --- a/modules/api-namespacequota-getOrganizationQuota.adoc +++ b/modules/api-namespacequota-getOrganizationQuota.adoc @@ -33,3 +33,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer "S +---- \ No newline at end of file diff --git a/modules/api-namespacequota-getOrganizationQuotaLimit.adoc b/modules/api-namespacequota-getOrganizationQuotaLimit.adoc index 21fe0a6b5..5dceece76 100644 --- a/modules/api-namespacequota-getOrganizationQuotaLimit.adoc +++ b/modules/api-namespacequota-getOrganizationQuotaLimit.adoc @@ -35,3 +35,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-getUserQuota.adoc b/modules/api-namespacequota-getUserQuota.adoc index e5c82f164..f25452fdf 100644 --- a/modules/api-namespacequota-getUserQuota.adoc +++ b/modules/api-namespacequota-getUserQuota.adoc @@ -33,3 +33,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-getUserQuotaLimit.adoc b/modules/api-namespacequota-getUserQuotaLimit.adoc index 97c8e7b73..43af0057c 100644 --- a/modules/api-namespacequota-getUserQuotaLimit.adoc +++ b/modules/api-namespacequota-getUserQuotaLimit.adoc @@ -35,3 +35,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}/limit/{limit_id}" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-listOrganizationQuota.adoc b/modules/api-namespacequota-listOrganizationQuota.adoc index f473d3ac2..ad266dab5 100644 --- a/modules/api-namespacequota-listOrganizationQuota.adoc +++ b/modules/api-namespacequota-listOrganizationQuota.adoc @@ -31,3 +31,12 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https:///api/v1/organization//quota +---- \ No newline at end of file diff --git a/modules/api-namespacequota-listOrganizationQuotaLimit.adoc b/modules/api-namespacequota-listOrganizationQuotaLimit.adoc index 6869fe27b..5fb7eacc1 100644 --- a/modules/api-namespacequota-listOrganizationQuotaLimit.adoc +++ b/modules/api-namespacequota-listOrganizationQuotaLimit.adoc @@ -33,3 +33,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota//limit" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-listUserQuota.adoc b/modules/api-namespacequota-listUserQuota.adoc index 0fd79addd..32e3a6314 100644 --- a/modules/api-namespacequota-listUserQuota.adoc +++ b/modules/api-namespacequota-listUserQuota.adoc @@ -23,3 +23,12 @@ |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-listUserQuotaLimit.adoc b/modules/api-namespacequota-listUserQuotaLimit.adoc index b49c74eb1..257e5b0c6 100644 --- a/modules/api-namespacequota-listUserQuotaLimit.adoc +++ b/modules/api-namespacequota-listUserQuotaLimit.adoc @@ -33,3 +33,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}/limit" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-changeOrganizationDetails.adoc b/modules/api-organization-changeOrganizationDetails.adoc index 06632af29..cae47e80e 100644 --- a/modules/api-organization-changeOrganizationDetails.adoc +++ b/modules/api-organization-changeOrganizationDetails.adoc @@ -52,3 +52,12 @@ _optional_|The number of seconds for tag expiration|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization/" +---- \ No newline at end of file diff --git a/modules/api-organization-createOrganization.adoc b/modules/api-organization-createOrganization.adoc index 7e748147d..19db938ca 100644 --- a/modules/api-organization-createOrganization.adoc +++ b/modules/api-organization-createOrganization.adoc @@ -20,7 +20,7 @@ Description of a new organization. |=== |Name|Description|Schema |**name** + -_optional_|Organization username|string +_required_|Organization username|string |**email** + _optional_|Organization contact email|string |**recaptcha_response** + @@ -40,3 +40,14 @@ _optional_|The (may be disabled) recaptcha response code for verification|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "name": "" + }' "https:///api/v1/organization/" +---- diff --git a/modules/api-organization-createOrganizationApplication.adoc b/modules/api-organization-createOrganizationApplication.adoc index fa4a4d720..2e4e2f258 100644 --- a/modules/api-organization-createOrganizationApplication.adoc +++ b/modules/api-organization-createOrganizationApplication.adoc @@ -16,7 +16,7 @@ Creates a new application under this organization. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + +|path|**orgname** + _required_|The name of the organization|string |=== @@ -29,15 +29,15 @@ Description of a new organization application. [options="header", width=100%, cols=".^3a,.^9a,.^4a"] |=== |Name|Description|Schema -|**name** + +|**name** + _required_|The name of the application|string -|**redirect_uri** + -_required_|The URI for the application's OAuth redirect|string -|**application_uri** + -_required_|The URI for the application's homepage|string -|**description** + +|**redirect_uri** + +_optional_|The URI for the application's OAuth redirect|string +|**application_uri** + +_optional_|The URI for the application's homepage|string +|**description** + _optional_|The human-readable description for the application|string -|**avatar_email** + +|**avatar_email** + _optional_|The e-mail address of the avatar to use for the application|string |=== @@ -54,3 +54,19 @@ _optional_|The e-mail address of the avatar to use for the application|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//applications" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "", + "redirect_uri": "", + "application_uri": "", + "description": "", + "avatar_email": "" + }' +---- \ No newline at end of file diff --git a/modules/api-organization-createProxyCacheConfig.adoc b/modules/api-organization-createProxyCacheConfig.adoc index ff45b0da9..d9ce1787b 100644 --- a/modules/api-organization-createProxyCacheConfig.adoc +++ b/modules/api-organization-createProxyCacheConfig.adoc @@ -28,7 +28,7 @@ Proxy cache configuration for an organization |=== |Name|Description|Schema |**upstream_registry** + -_optional_|Name of the upstream registry that is to be cached|string +_required_|Name of the upstream registry that is to be cached|string |=== @@ -44,3 +44,16 @@ _optional_|Name of the upstream registry that is to be cached|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//proxycache" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "upstream_registry": "" + }' +---- \ No newline at end of file diff --git a/modules/api-organization-deleteAdminedOrganization.adoc b/modules/api-organization-deleteAdminedOrganization.adoc index b070361df..f761a074a 100644 --- a/modules/api-organization-deleteAdminedOrganization.adoc +++ b/modules/api-organization-deleteAdminedOrganization.adoc @@ -33,3 +33,13 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization/" +---- diff --git a/modules/api-organization-deleteOrganizationApplication.adoc b/modules/api-organization-deleteOrganizationApplication.adoc index bb657ea9f..23bd6b348 100644 --- a/modules/api-organization-deleteOrganizationApplication.adoc +++ b/modules/api-organization-deleteOrganizationApplication.adoc @@ -35,3 +35,11 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization/{orgname}/applications/{client_id}" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-deleteProxyCacheConfig.adoc b/modules/api-organization-deleteProxyCacheConfig.adoc index 924db6353..03ee88965 100644 --- a/modules/api-organization-deleteProxyCacheConfig.adoc +++ b/modules/api-organization-deleteProxyCacheConfig.adoc @@ -31,3 +31,11 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization/{orgname}/proxycache" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getApplicationInformation.adoc b/modules/api-organization-getApplicationInformation.adoc index d9d60b546..355bd9621 100644 --- a/modules/api-organization-getApplicationInformation.adoc +++ b/modules/api-organization-getApplicationInformation.adoc @@ -31,3 +31,12 @@ _required_|The OAuth client ID|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/app/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganization.adoc b/modules/api-organization-getOrganization.adoc index 05f617eb9..e42557800 100644 --- a/modules/api-organization-getOrganization.adoc +++ b/modules/api-organization-getOrganization.adoc @@ -31,3 +31,13 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization/" +---- diff --git a/modules/api-organization-getOrganizationApplication.adoc b/modules/api-organization-getOrganizationApplication.adoc index 99a651df7..acbde91f2 100644 --- a/modules/api-organization-getOrganizationApplication.adoc +++ b/modules/api-organization-getOrganizationApplication.adoc @@ -35,3 +35,11 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//applications/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganizationApplications.adoc b/modules/api-organization-getOrganizationApplications.adoc index e9f6ce8cb..0fed27fd1 100644 --- a/modules/api-organization-getOrganizationApplications.adoc +++ b/modules/api-organization-getOrganizationApplications.adoc @@ -33,3 +33,12 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//applications" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganizationCollaborators.adoc b/modules/api-organization-getOrganizationCollaborators.adoc index 7d49bac15..8781212e7 100644 --- a/modules/api-organization-getOrganizationCollaborators.adoc +++ b/modules/api-organization-getOrganizationCollaborators.adoc @@ -33,3 +33,11 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization/{orgname}/collaborators" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganizationMember.adoc b/modules/api-organization-getOrganizationMember.adoc index 32a0dc9ec..a18013b89 100644 --- a/modules/api-organization-getOrganizationMember.adoc +++ b/modules/api-organization-getOrganizationMember.adoc @@ -16,10 +16,10 @@ Retrieves the details of a member of the organization. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**membername** + _required_|The username of the organization member|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -35,3 +35,12 @@ _required_|The username of the organization member|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//members/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganizationMembers.adoc b/modules/api-organization-getOrganizationMembers.adoc index ae45b7b69..8064dcdfb 100644 --- a/modules/api-organization-getOrganizationMembers.adoc +++ b/modules/api-organization-getOrganizationMembers.adoc @@ -33,3 +33,11 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//members" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getProxyCacheConfig.adoc b/modules/api-organization-getProxyCacheConfig.adoc index d39f89456..b4538da59 100644 --- a/modules/api-organization-getProxyCacheConfig.adoc +++ b/modules/api-organization-getProxyCacheConfig.adoc @@ -31,3 +31,12 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization/{orgname}/proxycache" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-removeOrganizationMember.adoc b/modules/api-organization-removeOrganizationMember.adoc index cddbebc45..1da46ab3e 100644 --- a/modules/api-organization-removeOrganizationMember.adoc +++ b/modules/api-organization-removeOrganizationMember.adoc @@ -17,10 +17,10 @@ Removes a member from an organization, revoking all its repository priviledges a [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**membername** + _required_|The username of the organization member|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -36,3 +36,12 @@ _required_|The username of the organization member|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//members/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-updateOrganizationApplication.adoc b/modules/api-organization-updateOrganizationApplication.adoc index e88462352..bd4eda0dd 100644 --- a/modules/api-organization-updateOrganizationApplication.adoc +++ b/modules/api-organization-updateOrganizationApplication.adoc @@ -16,9 +16,9 @@ Updates an application under this organization. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**client_id** + +|path|**client_id** + _required_|The OAuth client ID|string -|path|**orgname** + +|path|**orgname** + _required_|The name of the organization|string |=== @@ -31,15 +31,15 @@ Description of an updated application. [options="header", width=100%, cols=".^3a,.^9a,.^4a"] |=== |Name|Description|Schema -|**name** + +|**name** + _required_|The name of the application|string -|**redirect_uri** + +|**redirect_uri** + _required_|The URI for the application's OAuth redirect|string -|**application_uri** + +|**application_uri** + _required_|The URI for the application's homepage|string -|**description** + +|**description** + _optional_|The human-readable description for the application|string -|**avatar_email** + +|**avatar_email** + _optional_|The e-mail address of the avatar to use for the application|string |=== @@ -56,3 +56,19 @@ _optional_|The e-mail address of the avatar to use for the application|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/organization/test/applications/12345" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Updated Application Name", + "redirect_uri": "https://example.com/oauth/callback", + "application_uri": "https://example.com", + "description": "Updated description for the application", + "avatar_email": "avatar@example.com" + }' +---- \ No newline at end of file diff --git a/modules/api-organization-validateProxyCacheConfig.adoc b/modules/api-organization-validateProxyCacheConfig.adoc index a25f45e77..fb1332f1e 100644 --- a/modules/api-organization-validateProxyCacheConfig.adoc +++ b/modules/api-organization-validateProxyCacheConfig.adoc @@ -28,7 +28,7 @@ Proxy cache configuration for an organization |=== |Name|Description|Schema |**upstream_registry** + -_optional_|Name of the upstream registry that is to be cached|string +_required_|Name of the upstream registry that is to be cached|string |=== @@ -38,9 +38,22 @@ _optional_|Name of the upstream registry that is to be cached|string [options="header", width=100%, cols=".^2a,.^14a,.^4a"] |=== |HTTP Code|Description|Schema -|201|Successful creation| +|202|Successful creation| |400|Bad Request|<<_apierror,ApiError>> |401|Session required|<<_apierror,ApiError>> |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization/{orgname}/validateproxycache" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "upstream_registry": "" + }' + +---- \ No newline at end of file diff --git a/modules/api-permission-changeTeamPermissions.adoc b/modules/api-permission-changeTeamPermissions.adoc index 7ca4bd386..5fb525b90 100644 --- a/modules/api-permission-changeTeamPermissions.adoc +++ b/modules/api-permission-changeTeamPermissions.adoc @@ -16,10 +16,10 @@ Update the existing team permission. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**teamname** + -_required_|The name of the team to which the permission applies|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**teamname** + +_required_|The name of the team to which the permission applies|string |=== @@ -32,7 +32,7 @@ Description of a team permission. |=== |Name|Description|Schema |**role** + -_optional_|Role to use for the team|string +_required_|Role to use for the team|string |=== @@ -48,3 +48,12 @@ _optional_|Role to use for the team|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": ""}' \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- \ No newline at end of file diff --git a/modules/api-permission-changeUserPermissions.adoc b/modules/api-permission-changeUserPermissions.adoc index e987ca443..03b6a8736 100644 --- a/modules/api-permission-changeUserPermissions.adoc +++ b/modules/api-permission-changeUserPermissions.adoc @@ -16,10 +16,10 @@ Update the perimssions for an existing repository. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**username** + _required_|The username of the user to which the permission applies|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -32,7 +32,7 @@ Description of a user permission. |=== |Name|Description|Schema |**role** + -_optional_|Role to use for the user|string +_required_|Role to use for the user|string |=== @@ -48,3 +48,15 @@ _optional_|Role to use for the user|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": "admin"}' \ + https:///api/v1/repository///permissions/user/ +---- \ No newline at end of file diff --git a/modules/api-permission-deleteTeamPermissions.adoc b/modules/api-permission-deleteTeamPermissions.adoc index d298f7886..c6efaa201 100644 --- a/modules/api-permission-deleteTeamPermissions.adoc +++ b/modules/api-permission-deleteTeamPermissions.adoc @@ -16,10 +16,10 @@ Delete the permission for the specified team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**teamname** + -_required_|The name of the team to which the permission applies|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**teamname** + +_required_|The name of the team to which the permission applies|string |=== @@ -35,3 +35,13 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- \ No newline at end of file diff --git a/modules/api-permission-deleteUserPermissions.adoc b/modules/api-permission-deleteUserPermissions.adoc index 810a83d37..7ecbc0d94 100644 --- a/modules/api-permission-deleteUserPermissions.adoc +++ b/modules/api-permission-deleteUserPermissions.adoc @@ -16,10 +16,10 @@ Delete the permission for the user. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**username** + _required_|The username of the user to which the permission applies|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -35,3 +35,13 @@ _required_|The username of the user to which the permission applies|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///permissions/user/ +---- \ No newline at end of file diff --git a/modules/api-permission-getTeamPermissions.adoc b/modules/api-permission-getTeamPermissions.adoc index 4961d5f85..7f27a1301 100644 --- a/modules/api-permission-getTeamPermissions.adoc +++ b/modules/api-permission-getTeamPermissions.adoc @@ -16,10 +16,10 @@ Fetch the permission for the specified team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**teamname** + -_required_|The name of the team to which the permission applies|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**teamname** + +_required_|The name of the team to which the permission applies|string |=== @@ -35,3 +35,13 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- \ No newline at end of file diff --git a/modules/api-permission-getUserPermissions.adoc b/modules/api-permission-getUserPermissions.adoc index f3ec07e6c..190e135d1 100644 --- a/modules/api-permission-getUserPermissions.adoc +++ b/modules/api-permission-getUserPermissions.adoc @@ -16,10 +16,10 @@ Get the permission for the specified user. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**username** + _required_|The username of the user to which the permission applies|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -35,3 +35,13 @@ _required_|The username of the user to which the permission applies|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository//permissions/user/" +---- \ No newline at end of file diff --git a/modules/api-permission-getUserTransitivePermission.adoc b/modules/api-permission-getUserTransitivePermission.adoc index a41420846..88a504de7 100644 --- a/modules/api-permission-getUserTransitivePermission.adoc +++ b/modules/api-permission-getUserTransitivePermission.adoc @@ -16,10 +16,10 @@ Get the fetch the permission for the specified user. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**username** + _required_|The username of the user to which the permissions apply|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -35,3 +35,13 @@ _required_|The username of the user to which the permissions apply|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository//permissions/user//transitive" +---- \ No newline at end of file diff --git a/modules/api-permission-listRepoTeamPermissions.adoc b/modules/api-permission-listRepoTeamPermissions.adoc index 795bca0e4..6d08c69b4 100644 --- a/modules/api-permission-listRepoTeamPermissions.adoc +++ b/modules/api-permission-listRepoTeamPermissions.adoc @@ -33,3 +33,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- \ No newline at end of file diff --git a/modules/api-permission-listRepoUserPermissions.adoc b/modules/api-permission-listRepoUserPermissions.adoc index f79961089..941b4a76a 100644 --- a/modules/api-permission-listRepoUserPermissions.adoc +++ b/modules/api-permission-listRepoUserPermissions.adoc @@ -33,3 +33,14 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///permissions/user// +---- \ No newline at end of file diff --git a/modules/api-policy-createOrganizationAutoPrunePolicy.adoc b/modules/api-policy-createOrganizationAutoPrunePolicy.adoc new file mode 100644 index 000000000..77d269fd1 --- /dev/null +++ b/modules/api-policy-createOrganizationAutoPrunePolicy.adoc @@ -0,0 +1,62 @@ + += createOrganizationAutoPrunePolicy +Creates an auto-prune policy for the organization + +[discrete] +== POST /api/v1/organization/{orgname}/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/organization//autoprunepolicy/ +---- \ No newline at end of file diff --git a/modules/api-policy-createRepositoryAutoPrunePolicy.adoc b/modules/api-policy-createRepositoryAutoPrunePolicy.adoc new file mode 100644 index 000000000..790a488e5 --- /dev/null +++ b/modules/api-policy-createRepositoryAutoPrunePolicy.adoc @@ -0,0 +1,62 @@ + += createRepositoryAutoPrunePolicy +Creates an auto-prune policy for the repository + +[discrete] +== POST /api/v1/repository/{repository}/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' http:///api/v1/repository///autoprunepolicy/ +---- \ No newline at end of file diff --git a/modules/api-policy-createUserAutoPrunePolicy.adoc b/modules/api-policy-createUserAutoPrunePolicy.adoc new file mode 100644 index 000000000..da859db8e --- /dev/null +++ b/modules/api-policy-createUserAutoPrunePolicy.adoc @@ -0,0 +1,60 @@ + += createUserAutoPrunePolicy +Creates the auto-prune policy for the currently logged in user + +[discrete] +== POST /api/v1/user/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/user/autoprunepolicy/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": 10, + "tagPattern": "v*", + "tagPatternMatches": true + }' +---- \ No newline at end of file diff --git a/modules/api-policy-deleteOrganizationAutoPrunePolicy.adoc b/modules/api-policy-deleteOrganizationAutoPrunePolicy.adoc new file mode 100644 index 000000000..baa6f7802 --- /dev/null +++ b/modules/api-policy-deleteOrganizationAutoPrunePolicy.adoc @@ -0,0 +1,46 @@ + += deleteOrganizationAutoPrunePolicy +Deletes the auto-prune policy for the organization + +[discrete] +== DELETE /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/organization/example_org/autoprunepolicy/example_policy_uuid" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-deleteRepositoryAutoPrunePolicy.adoc b/modules/api-policy-deleteRepositoryAutoPrunePolicy.adoc new file mode 100644 index 000000000..326fcda93 --- /dev/null +++ b/modules/api-policy-deleteRepositoryAutoPrunePolicy.adoc @@ -0,0 +1,46 @@ + += deleteRepositoryAutoPrunePolicy +Deletes the auto-prune policy for the repository + +[discrete] +== DELETE /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/autoprunepolicy/123e4567-e89b-12d3-a456-426614174000" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-deleteUserAutoPrunePolicy.adoc b/modules/api-policy-deleteUserAutoPrunePolicy.adoc new file mode 100644 index 000000000..2d3f5c3af --- /dev/null +++ b/modules/api-policy-deleteUserAutoPrunePolicy.adoc @@ -0,0 +1,44 @@ + += deleteUserAutoPrunePolicy +Deletes the auto-prune policy for the currently logged in user + +[discrete] +== DELETE /api/v1/user/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/user/autoprunepolicy/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-getOrganizationAutoPrunePolicy.adoc b/modules/api-policy-getOrganizationAutoPrunePolicy.adoc new file mode 100644 index 000000000..ea4db2a8a --- /dev/null +++ b/modules/api-policy-getOrganizationAutoPrunePolicy.adoc @@ -0,0 +1,45 @@ + += getOrganizationAutoPrunePolicy +Fetches the auto-prune policy for the organization + +[discrete] +== GET /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/ +---- \ No newline at end of file diff --git a/modules/api-policy-getRepositoryAutoPrunePolicy.adoc b/modules/api-policy-getRepositoryAutoPrunePolicy.adoc new file mode 100644 index 000000000..4a4dc6a8b --- /dev/null +++ b/modules/api-policy-getRepositoryAutoPrunePolicy.adoc @@ -0,0 +1,46 @@ + += getRepositoryAutoPrunePolicy +Fetches the auto-prune policy for the repository + +[discrete] +== GET /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/autoprunepolicy/123e4567-e89b-12d3-a456-426614174000" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-getUserAutoPrunePolicy.adoc b/modules/api-policy-getUserAutoPrunePolicy.adoc new file mode 100644 index 000000000..d7bc517af --- /dev/null +++ b/modules/api-policy-getUserAutoPrunePolicy.adoc @@ -0,0 +1,44 @@ + += getUserAutoPrunePolicy +Fetches the auto-prune policy for the currently logged in user + +[discrete] +== GET /api/v1/user/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/autoprunepolicy/{policy_uuid}" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-listOrganizationAutoPrunePolicies.adoc b/modules/api-policy-listOrganizationAutoPrunePolicies.adoc new file mode 100644 index 000000000..ee4603864 --- /dev/null +++ b/modules/api-policy-listOrganizationAutoPrunePolicies.adoc @@ -0,0 +1,44 @@ + += listOrganizationAutoPrunePolicies +Lists the auto-prune policies for the organization + +[discrete] +== GET /api/v1/organization/{orgname}/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/organization/example_org/autoprunepolicy/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-listRepositoryAutoPrunePolicies.adoc b/modules/api-policy-listRepositoryAutoPrunePolicies.adoc new file mode 100644 index 000000000..7bc2470e5 --- /dev/null +++ b/modules/api-policy-listRepositoryAutoPrunePolicies.adoc @@ -0,0 +1,44 @@ + += listRepositoryAutoPrunePolicies +Lists the auto-prune policies for the repository + +[discrete] +== GET /api/v1/repository/{repository}/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/autoprunepolicy/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-listUserAutoPrunePolicies.adoc b/modules/api-policy-listUserAutoPrunePolicies.adoc new file mode 100644 index 000000000..2e1cadf35 --- /dev/null +++ b/modules/api-policy-listUserAutoPrunePolicies.adoc @@ -0,0 +1,34 @@ + += listUserAutoPrunePolicies +Lists the auto-prune policies for the currently logged in user + +[discrete] +== GET /api/v1/user/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/autoprunepolicy/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-updateOrganizationAutoPrunePolicy.adoc b/modules/api-policy-updateOrganizationAutoPrunePolicy.adoc new file mode 100644 index 000000000..f9f7dd659 --- /dev/null +++ b/modules/api-policy-updateOrganizationAutoPrunePolicy.adoc @@ -0,0 +1,69 @@ + += updateOrganizationAutoPrunePolicy +Updates the auto-prune policy for the organization + +[discrete] +== PUT /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' "/api/v1/organization//autoprunepolicy/" +---- \ No newline at end of file diff --git a/modules/api-policy-updateRepositoryAutoPrunePolicy.adoc b/modules/api-policy-updateRepositoryAutoPrunePolicy.adoc new file mode 100644 index 000000000..b50a407f2 --- /dev/null +++ b/modules/api-policy-updateRepositoryAutoPrunePolicy.adoc @@ -0,0 +1,73 @@ + += updateRepositoryAutoPrunePolicy +Updates the auto-prune policy for the repository + +[discrete] +== PUT /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": "5", + "tagPattern": "^test.*", + "tagPatternMatches": true + }' \ + "https://quay-server.example.com/api/v1/repository///autoprunepolicy/" +---- \ No newline at end of file diff --git a/modules/api-policy-updateUserAutoPrunePolicy.adoc b/modules/api-policy-updateUserAutoPrunePolicy.adoc new file mode 100644 index 000000000..41a9622e4 --- /dev/null +++ b/modules/api-policy-updateUserAutoPrunePolicy.adoc @@ -0,0 +1,70 @@ + += updateUserAutoPrunePolicy +Updates the auto-prune policy for the currently logged in user + +[discrete] +== PUT /api/v1/user/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/user/autoprunepolicy/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": "10", + "tagPattern": ".*-old", + "tagPatternMatches": true + }' +---- \ No newline at end of file diff --git a/modules/api-policy.adoc b/modules/api-policy.adoc new file mode 100644 index 000000000..5b2896375 --- /dev/null +++ b/modules/api-policy.adoc @@ -0,0 +1,4 @@ + += policy + + diff --git a/modules/api-prototype-createOrganizationPrototypePermission.adoc b/modules/api-prototype-createOrganizationPrototypePermission.adoc index 6f27aecf9..d482be834 100644 --- a/modules/api-prototype-createOrganizationPrototypePermission.adoc +++ b/modules/api-prototype-createOrganizationPrototypePermission.adoc @@ -30,11 +30,11 @@ Description of a new prototype |=== |Name|Description|Schema |**role** + -_optional_|Role that should be applied to the delegate|string +_required_|Role that should be applied to the delegate|string |**activating_user** + _optional_|Repository creating user to whom the rule should apply|object |**delegate** + -_optional_|Information about the user or team to which the rule grants access|object +_required_|Information about the user or team to which the rule grants access|object |=== @@ -50,3 +50,20 @@ _optional_|Information about the user or team to which the rule grants access|ob |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" --data '{ + "role": "", + "delegate": { + "name": "", + "kind": "user" + }, + "activating_user": { + "name": "" + } + }' https:///api/v1/organization//prototypes +---- \ No newline at end of file diff --git a/modules/api-prototype-deleteOrganizationPrototypePermission.adoc b/modules/api-prototype-deleteOrganizationPrototypePermission.adoc index eba52ea75..73a0ebc3b 100644 --- a/modules/api-prototype-deleteOrganizationPrototypePermission.adoc +++ b/modules/api-prototype-deleteOrganizationPrototypePermission.adoc @@ -16,10 +16,10 @@ Delete an existing permission prototype. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**prototypeid** + _required_|The ID of the prototype|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -35,3 +35,13 @@ _required_|The ID of the prototype|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +curl -X DELETE \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/organization//prototypes/ +---- \ No newline at end of file diff --git a/modules/api-prototype-getOrganizationPrototypePermissions.adoc b/modules/api-prototype-getOrganizationPrototypePermissions.adoc index f6b4a5b84..b1cd84dde 100644 --- a/modules/api-prototype-getOrganizationPrototypePermissions.adoc +++ b/modules/api-prototype-getOrganizationPrototypePermissions.adoc @@ -33,3 +33,14 @@ _required_|The name of the organization|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/organization//prototypes +---- \ No newline at end of file diff --git a/modules/api-prototype-updateOrganizationPrototypePermission.adoc b/modules/api-prototype-updateOrganizationPrototypePermission.adoc index 53507c52c..66823707f 100644 --- a/modules/api-prototype-updateOrganizationPrototypePermission.adoc +++ b/modules/api-prototype-updateOrganizationPrototypePermission.adoc @@ -16,10 +16,10 @@ Update the role of an existing permission prototype. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**prototypeid** + _required_|The ID of the prototype|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -48,3 +48,16 @@ _optional_|Role that should be applied to the permission|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "role": "write" + }' \ + https:///api/v1/organization//prototypes/ +---- diff --git a/modules/api-referrers-getReferrers.adoc b/modules/api-referrers-getReferrers.adoc new file mode 100644 index 000000000..657d07ebc --- /dev/null +++ b/modules/api-referrers-getReferrers.adoc @@ -0,0 +1,23 @@ + += getReferrers +List v2 API referrers of an image digest. + +[discrete] +== GET /v2/{organization_name}/{repository_name}/referrers/{digest} + +[discrete] +== Request body schema (application/json) + +Referrers of an image digest. + +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**referrers** + +_required_| Looks up the OCI referrers of a manifest under a repository.|string +|**manifest_digest** + +_required_|The digest of the manifest|string +|=== \ No newline at end of file diff --git a/modules/api-referrers.adoc b/modules/api-referrers.adoc new file mode 100644 index 000000000..b745bcddb --- /dev/null +++ b/modules/api-referrers.adoc @@ -0,0 +1,3 @@ + += referrers +List v2 API referrers \ No newline at end of file diff --git a/modules/api-repository-changeRepoState.adoc b/modules/api-repository-changeRepoState.adoc index 303e97130..fbe6f32f2 100644 --- a/modules/api-repository-changeRepoState.adoc +++ b/modules/api-repository-changeRepoState.adoc @@ -30,7 +30,7 @@ Change the state of the repository. |=== |Name|Description|Schema |**state** + -_optional_|Determines whether pushes are allowed.|string +_required_|Determines whether pushes are allowed.|string |=== @@ -46,3 +46,7 @@ _optional_|Determines whether pushes are allowed.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + diff --git a/modules/api-repository-changeRepoVisibility.adoc b/modules/api-repository-changeRepoVisibility.adoc index 56b01a668..4611ebffd 100644 --- a/modules/api-repository-changeRepoVisibility.adoc +++ b/modules/api-repository-changeRepoVisibility.adoc @@ -30,7 +30,7 @@ Change the visibility for the repository. |=== |Name|Description|Schema |**visibility** + -_optional_|Visibility which the repository will start with|string +_required_|Visibility which the repository will start with|string |=== @@ -46,3 +46,17 @@ _optional_|Visibility which the repository will start with|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example Command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "visibility": "private" + }' \ + "https://quay-server.example.com/api/v1/repository///changevisibility" +---- \ No newline at end of file diff --git a/modules/api-repository-createRepo.adoc b/modules/api-repository-createRepo.adoc index 593dbfa9d..a4915773f 100644 --- a/modules/api-repository-createRepo.adoc +++ b/modules/api-repository-createRepo.adoc @@ -20,13 +20,13 @@ Description of a new repository |=== |Name|Description|Schema |**repository** + -_optional_|Repository name|string +_required_|Repository name|string |**visibility** + -_optional_|Visibility which the repository will start with|string +_required_|Visibility which the repository will start with|string |**namespace** + _optional_|Namespace in which the repository should be created. If omitted, the username of the caller is used|string |**description** + -_optional_|Markdown encoded description for the repository|string +_required_|Markdown encoded description for the repository|string |**repo_kind** + _optional_|The kind of repository| |=== @@ -44,3 +44,19 @@ _optional_|The kind of repository| |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "repository": "", + "visibility": "", + "description": "." + }' \ + "https://quay-server.example.com/api/v1/repository" +---- \ No newline at end of file diff --git a/modules/api-repository-deleteRepository.adoc b/modules/api-repository-deleteRepository.adoc index ed0996edb..55a682a1a 100644 --- a/modules/api-repository-deleteRepository.adoc +++ b/modules/api-repository-deleteRepository.adoc @@ -33,3 +33,11 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "/api/v1/repository//" +---- \ No newline at end of file diff --git a/modules/api-repository-getRepo.adoc b/modules/api-repository-getRepo.adoc index c4ab513ef..e38283cb4 100644 --- a/modules/api-repository-getRepo.adoc +++ b/modules/api-repository-getRepo.adoc @@ -46,3 +46,11 @@ _optional_|Whether to include action statistics|boolean |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "/api/v1/repository//" +---- \ No newline at end of file diff --git a/modules/api-repository-listRepos.adoc b/modules/api-repository-listRepos.adoc index 689365415..6b27cc238 100644 --- a/modules/api-repository-listRepos.adoc +++ b/modules/api-repository-listRepos.adoc @@ -26,11 +26,11 @@ _optional_|Whether to include the repository's popularity metric.|boolean |query|**last_modified** + _optional_|Whether to include when the repository was last modified.|boolean |query|**public** + -_optional_|Adds any repositories visible to the user by virtue of being public|boolean +_required_|Adds any repositories visible to the user by virtue of being public|boolean |query|**starred** + -_optional_|Filters the repositories returned to those starred by the user|boolean +_required_|Filters the repositories returned to those starred by the user|boolean |query|**namespace** + -_optional_|Filters the repositories returned to this namespace|string +_required_|Filters the repositories returned to this namespace|string |=== @@ -46,3 +46,13 @@ _optional_|Filters the repositories returned to this namespace|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository?public=true&starred=false&namespace=" +---- \ No newline at end of file diff --git a/modules/api-repository-updateRepo.adoc b/modules/api-repository-updateRepo.adoc index 9526da131..3ed1af06b 100644 --- a/modules/api-repository-updateRepo.adoc +++ b/modules/api-repository-updateRepo.adoc @@ -30,7 +30,7 @@ Fields which can be updated in a repository. |=== |Name|Description|Schema |**description** + -_optional_|Markdown encoded description for the repository|string +_required_|Markdown encoded description for the repository|string |=== @@ -46,3 +46,16 @@ _optional_|Markdown encoded description for the repository|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "description": "This is an updated description for the repository." + }' \ + "https://quay-server.example.com/api/v1/repository//" +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-createRepoNotification.adoc b/modules/api-repositorynotification-createRepoNotification.adoc index a9915151a..050a1649b 100644 --- a/modules/api-repositorynotification-createRepoNotification.adoc +++ b/modules/api-repositorynotification-createRepoNotification.adoc @@ -30,13 +30,13 @@ Information for creating a notification on a repository |=== |Name|Description|Schema |**event** + -_optional_|The event on which the notification will respond|string +_required_|The event on which the notification will respond|string |**method** + -_optional_|The method of notification (such as email or web callback)|string +_required_|The method of notification (such as email or web callback)|string |**config** + -_optional_|JSON config information for the specific method of notification|object +_required_|JSON config information for the specific method of notification|object |**eventConfig** + -_optional_|JSON config information for the specific event of notification|object +_required_|JSON config information for the specific event of notification|object |**title** + _optional_|The human-readable title of the notification|string |=== @@ -54,3 +54,23 @@ _optional_|The human-readable title of the notification|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "event": "", + "method": "", + "config": { + "": "" + }, + "eventConfig": { + "": "" + } + }' \ + https:///api/v1/repository///notification/ +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-deleteRepoNotification.adoc b/modules/api-repositorynotification-deleteRepoNotification.adoc index 1d861e5c1..85ec967ab 100644 --- a/modules/api-repositorynotification-deleteRepoNotification.adoc +++ b/modules/api-repositorynotification-deleteRepoNotification.adoc @@ -16,10 +16,10 @@ Deletes the specified notification. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**uuid** + -_required_|The UUID of the notification|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**uuid** + +_required_|The UUID of the notification|string |=== @@ -35,3 +35,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository///notification/ +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-getRepoNotification.adoc b/modules/api-repositorynotification-getRepoNotification.adoc index 849354080..9a8ea167c 100644 --- a/modules/api-repositorynotification-getRepoNotification.adoc +++ b/modules/api-repositorynotification-getRepoNotification.adoc @@ -16,10 +16,10 @@ Get information for the specified notification. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**uuid** + -_required_|The UUID of the notification|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**uuid** + +_required_|The UUID of the notification|string |=== @@ -35,3 +35,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification/ +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-listRepoNotifications.adoc b/modules/api-repositorynotification-listRepoNotifications.adoc index 0b21d4b70..7b9c26085 100644 --- a/modules/api-repositorynotification-listRepoNotifications.adoc +++ b/modules/api-repositorynotification-listRepoNotifications.adoc @@ -33,3 +33,11 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" https:///api/v1/repository///notification +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc b/modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc index ddaf94bb1..96c4da3cb 100644 --- a/modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc +++ b/modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc @@ -16,10 +16,10 @@ Resets repository notification to 0 failures. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**uuid** + -_required_|The UUID of the notification|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**uuid** + +_required_|The UUID of the notification|string |=== @@ -29,9 +29,19 @@ _required_|The full path of the repository. e.g. namespace/name|string [options="header", width=100%, cols=".^2a,.^14a,.^4a"] |=== |HTTP Code|Description|Schema -|201|Successful creation| +|204|Successful creation| |400|Bad Request|<<_apierror,ApiError>> |401|Session required|<<_apierror,ApiError>> |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification/ +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-testRepoNotification.adoc b/modules/api-repositorynotification-testRepoNotification.adoc index c966ecbf2..b03145fae 100644 --- a/modules/api-repositorynotification-testRepoNotification.adoc +++ b/modules/api-repositorynotification-testRepoNotification.adoc @@ -16,10 +16,10 @@ Queues a test notification for this repository. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**uuid** + -_required_|The UUID of the notification|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**uuid** + +_required_|The UUID of the notification|string |=== @@ -35,3 +35,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification//test +---- \ No newline at end of file diff --git a/modules/api-repotoken-changeToken.adoc b/modules/api-repotoken-changeToken.adoc index 5af74bd5c..80fd33677 100644 --- a/modules/api-repotoken-changeToken.adoc +++ b/modules/api-repotoken-changeToken.adoc @@ -16,10 +16,10 @@ Update the permissions for the specified repository token. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**code** + -_required_|The token code|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**code** + +_required_|The token code|string |=== diff --git a/modules/api-repotoken-createToken.adoc b/modules/api-repotoken-createToken.adoc index f4c20fc0a..2997cb0a1 100644 --- a/modules/api-repotoken-createToken.adoc +++ b/modules/api-repotoken-createToken.adoc @@ -30,7 +30,7 @@ Description of a new token. |=== |Name|Description|Schema |**friendlyName** + -_optional_|Friendly name to help identify the token|string +_required_|Friendly name to help identify the token|string |=== diff --git a/modules/api-repotoken-deleteToken.adoc b/modules/api-repotoken-deleteToken.adoc index a9b706df2..de59b8169 100644 --- a/modules/api-repotoken-deleteToken.adoc +++ b/modules/api-repotoken-deleteToken.adoc @@ -16,10 +16,10 @@ Delete the repository token. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**code** + -_required_|The token code|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**code** + +_required_|The token code|string |=== diff --git a/modules/api-repotoken-getTokens.adoc b/modules/api-repotoken-getTokens.adoc index afcfe2d4a..7ab3a6f54 100644 --- a/modules/api-repotoken-getTokens.adoc +++ b/modules/api-repotoken-getTokens.adoc @@ -16,10 +16,10 @@ Fetch the specified repository token information. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**code** + -_required_|The token code|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**code** + +_required_|The token code|string |=== diff --git a/modules/api-robot-createOrgRobot.adoc b/modules/api-robot-createOrgRobot.adoc index 163ba566f..e0a84277a 100644 --- a/modules/api-robot-createOrgRobot.adoc +++ b/modules/api-robot-createOrgRobot.adoc @@ -16,10 +16,10 @@ Create a new robot in the organization. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**robot_shortname** + _required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -44,9 +44,17 @@ _optional_|Optional unstructured metadata for the robot|object [options="header", width=100%, cols=".^2a,.^14a,.^4a"] |=== |HTTP Code|Description|Schema -|200|Successful invocation| +|201|Successful invocation| |400|Bad Request|<<_apierror,ApiError>> |401|Session required|<<_apierror,ApiError>> |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " "https:///api/v1/organization//robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-createOrgRobotFederation.adoc b/modules/api-robot-createOrgRobotFederation.adoc new file mode 100644 index 000000000..4a209df26 --- /dev/null +++ b/modules/api-robot-createOrgRobotFederation.adoc @@ -0,0 +1,42 @@ += createOrgRobotFederation + +Create a federation configuration for the specified organization robot. + +[discrete] +== POST /api/v1/organization/{orgname}/robots/{robot_shortname}/federation + +Retrieve the federation configuration for the specified organization robot. + +**Authorizations: **oauth2_implicit (**user:admin**) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|*orgname* + *robot_shortname* +_required_|The name of the organization and the short name for the robot, without any user or organization prefix|string +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful invocation | +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/organization/{orgname}/robots/{robot_shortname}/federation" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" +---- diff --git a/modules/api-robot-createUserRobot.adoc b/modules/api-robot-createUserRobot.adoc index 38d7d016b..bdd603fce 100644 --- a/modules/api-robot-createUserRobot.adoc +++ b/modules/api-robot-createUserRobot.adoc @@ -48,3 +48,11 @@ _optional_|Optional unstructured metadata for the robot|object |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " "https:///api/v1/user/robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-deleteOrgRobot.adoc b/modules/api-robot-deleteOrgRobot.adoc index 919e9f1c3..36c01dffa 100644 --- a/modules/api-robot-deleteOrgRobot.adoc +++ b/modules/api-robot-deleteOrgRobot.adoc @@ -16,10 +16,10 @@ Delete an existing organization robot. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**robot_shortname** + _required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -35,3 +35,13 @@ _required_|The short name for the robot, without any user or organization prefix |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-deleteOrgRobotFederation.adoc b/modules/api-robot-deleteOrgRobotFederation.adoc new file mode 100644 index 000000000..5d0f48dfc --- /dev/null +++ b/modules/api-robot-deleteOrgRobotFederation.adoc @@ -0,0 +1,31 @@ += deleteOrgRobotFederation + +Delete a federation configuration for the specified organization robot. + +[discrete] +== DELETE /api/v1/organization/{orgname}/robots/{robot_shortname}/federation + +**Authorizations: **oauth2_implicit (org) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|orgname + +_required_|The name of the organization and the short name for the robot, without any user or organization prefix|string +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== \ No newline at end of file diff --git a/modules/api-robot-deleteUserRobot.adoc b/modules/api-robot-deleteUserRobot.adoc index 9d3226d48..43d62542a 100644 --- a/modules/api-robot-deleteUserRobot.adoc +++ b/modules/api-robot-deleteUserRobot.adoc @@ -33,3 +33,12 @@ _required_|The short name for the robot, without any user or organization prefix |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-getOrgRobot.adoc b/modules/api-robot-getOrgRobot.adoc index cf9d34f78..83470ac3a 100644 --- a/modules/api-robot-getOrgRobot.adoc +++ b/modules/api-robot-getOrgRobot.adoc @@ -16,10 +16,10 @@ Returns the organization's robot with the specified name. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**robot_shortname** + _required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -35,3 +35,13 @@ _required_|The short name for the robot, without any user or organization prefix |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/organization//robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-getOrgRobotFederation.adoc b/modules/api-robot-getOrgRobotFederation.adoc new file mode 100644 index 000000000..5b5bf1575 --- /dev/null +++ b/modules/api-robot-getOrgRobotFederation.adoc @@ -0,0 +1,72 @@ +//// += getOrgRobotFederation + +Manage federation configuration for a robot account within an organization. + +[discrete] +== GET /api/v1/organization/{orgname}/robots/{robot_shortname}/federation + +Retrieve the federation configuration for the specified organization robot. + +**Authorizations: **oauth2_implicit (**user:admin**) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|*orgname* + *robot_shortname* +_required_|The name of the organization and the short name for the robot, without any user or organization prefix|string +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== +//// + += Auth Federated Robot Token +Return an expiring robot token using the robot identity federation mechanism. + +[discrete] +== GET oauth2/federation/robot/token + +**Authorizations:** oauth2_implicit (**robot:auth**) + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful authentication and token generation|{ "token": "string" } +|401|Unauthorized: missing or invalid authentication|{ "error": "string" } +|=== + +[discrete] +== Request Body + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|body|**auth_result** + +_required_|The result of the authentication process, containing information about the robot identity.|{ "missing": "boolean", "error_message": "string", "context": { "robot": "RobotObject" } } +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/oauth2/federation/robot/token" \ + -H "Authorization: Bearer " +---- diff --git a/modules/api-robot-getOrgRobotPermissions.adoc b/modules/api-robot-getOrgRobotPermissions.adoc index cba136b99..e2c965363 100644 --- a/modules/api-robot-getOrgRobotPermissions.adoc +++ b/modules/api-robot-getOrgRobotPermissions.adoc @@ -16,10 +16,10 @@ Returns the list of repository permissions for the org's robot. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**robot_shortname** + _required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -35,3 +35,13 @@ _required_|The short name for the robot, without any user or organization prefix |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/organization//robots//permissions" +---- \ No newline at end of file diff --git a/modules/api-robot-getOrgRobots.adoc b/modules/api-robot-getOrgRobots.adoc index 596215c44..09b2dfab0 100644 --- a/modules/api-robot-getOrgRobots.adoc +++ b/modules/api-robot-getOrgRobots.adoc @@ -32,7 +32,7 @@ _optional_|If specified, the number of robots to return.|integer |query|**token** + _optional_|If false, the robot's token is not returned.|boolean |query|**permissions** + -_optional_|Whether to include repostories and teams in which the robots have permission.|boolean +_optional_|Whether to include repositories and teams in which the robots have permission.|boolean |=== @@ -48,3 +48,11 @@ _optional_|Whether to include repostories and teams in which the robots have per |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/organization//robots" +---- \ No newline at end of file diff --git a/modules/api-robot-getUserRobot.adoc b/modules/api-robot-getUserRobot.adoc index 89a204aa6..5266691f1 100644 --- a/modules/api-robot-getUserRobot.adoc +++ b/modules/api-robot-getUserRobot.adoc @@ -33,3 +33,13 @@ _required_|The short name for the robot, without any user or organization prefix |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-getUserRobotPermissions.adoc b/modules/api-robot-getUserRobotPermissions.adoc index 3d4312459..9e4ee24cf 100644 --- a/modules/api-robot-getUserRobotPermissions.adoc +++ b/modules/api-robot-getUserRobotPermissions.adoc @@ -33,3 +33,13 @@ _required_|The short name for the robot, without any user or organization prefix |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/user/robots//permissions" +---- \ No newline at end of file diff --git a/modules/api-robot-getUserRobots.adoc b/modules/api-robot-getUserRobots.adoc index 00d61eee5..73adeb051 100644 --- a/modules/api-robot-getUserRobots.adoc +++ b/modules/api-robot-getUserRobots.adoc @@ -38,3 +38,12 @@ _optional_|Whether to include repositories and teams in which the robots have pe |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/robots?limit=10&token=false&permissions=true" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-robot-regenerateOrgRobotToken.adoc b/modules/api-robot-regenerateOrgRobotToken.adoc index be8dd8fbd..b9a91f01c 100644 --- a/modules/api-robot-regenerateOrgRobotToken.adoc +++ b/modules/api-robot-regenerateOrgRobotToken.adoc @@ -16,10 +16,10 @@ Regenerates the token for an organization robot. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**robot_shortname** + _required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -35,3 +35,13 @@ _required_|The short name for the robot, without any user or organization prefix |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + "/api/v1/organization//robots//regenerate" +---- \ No newline at end of file diff --git a/modules/api-robot-regenerateUserRobotToken.adoc b/modules/api-robot-regenerateUserRobotToken.adoc index f519d55ef..49cb140dc 100644 --- a/modules/api-robot-regenerateUserRobotToken.adoc +++ b/modules/api-robot-regenerateUserRobotToken.adoc @@ -33,3 +33,12 @@ _required_|The short name for the robot, without any user or organization prefix |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots//regenerate" +---- \ No newline at end of file diff --git a/modules/api-search-conductRepoSearch.adoc b/modules/api-search-conductRepoSearch.adoc index 82d25651d..cf502e94c 100644 --- a/modules/api-search-conductRepoSearch.adoc +++ b/modules/api-search-conductRepoSearch.adoc @@ -36,3 +36,11 @@ _optional_|The search query.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/find/repositories?query=&page=1&includeUsage=true" \ + -H "Authorization: Bearer " +---- diff --git a/modules/api-search-conductSearch.adoc b/modules/api-search-conductSearch.adoc index df40c5730..d5a01015f 100644 --- a/modules/api-search-conductSearch.adoc +++ b/modules/api-search-conductSearch.adoc @@ -34,3 +34,11 @@ _optional_|The search query.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/find/all?query=" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-search-getMatchingEntities.adoc b/modules/api-search-getMatchingEntities.adoc index 1f9288876..d0921434c 100644 --- a/modules/api-search-getMatchingEntities.adoc +++ b/modules/api-search-getMatchingEntities.adoc @@ -46,3 +46,11 @@ _optional_|Namespace to use when querying for org entities.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/entities/?includeOrgs=&includeTeams=&namespace=" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-secscan-getRepoManifestSecurity.adoc b/modules/api-secscan-getRepoManifestSecurity.adoc index 61a417be3..03b96004b 100644 --- a/modules/api-secscan-getRepoManifestSecurity.adoc +++ b/modules/api-secscan-getRepoManifestSecurity.adoc @@ -16,10 +16,10 @@ [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**manifestref** + -_required_|The digest of the manifest|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string |=== @@ -46,3 +46,13 @@ _optional_|Include vulnerabilities informations|boolean |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "https://quay-server.example.com/api/v1/repository///manifest//security?vulnerabilities=" +---- \ No newline at end of file diff --git a/modules/api-superuser-approveServiceKey.adoc b/modules/api-superuser-approveServiceKey.adoc index 93fc7b776..f1378e460 100644 --- a/modules/api-superuser-approveServiceKey.adoc +++ b/modules/api-superuser-approveServiceKey.adoc @@ -46,3 +46,17 @@ _optional_|Optional approval notes|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "notes": "" + }' \ + "https:///api/v1/superuser/approvedkeys/" +---- \ No newline at end of file diff --git a/modules/api-superuser-changeOrganization.adoc b/modules/api-superuser-changeOrganization.adoc index b70aacd67..828cc8b20 100644 --- a/modules/api-superuser-changeOrganization.adoc +++ b/modules/api-superuser-changeOrganization.adoc @@ -52,3 +52,20 @@ _optional_|The number of seconds for tag expiration|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "email": "", + "invoice_email": , + "invoice_email_address": "", + "tag_expiration_s": + }' \ + "https:///api/v1/superuser/organizations/" +---- \ No newline at end of file diff --git a/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc b/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc index 5b56fab01..e11cb8ec5 100644 --- a/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc +++ b/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc @@ -48,3 +48,16 @@ _optional_|Number of bytes the organization is allowed|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/superuser/users//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-changeUserQuotaSuperUser.adoc b/modules/api-superuser-changeUserQuotaSuperUser.adoc index d1a856b2d..0517f8a3d 100644 --- a/modules/api-superuser-changeUserQuotaSuperUser.adoc +++ b/modules/api-superuser-changeUserQuotaSuperUser.adoc @@ -48,3 +48,16 @@ _optional_|Number of bytes the organization is allowed|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/superuser/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-createInstallUser.adoc b/modules/api-superuser-createInstallUser.adoc index 68f3aeec2..b2100618e 100644 --- a/modules/api-superuser-createInstallUser.adoc +++ b/modules/api-superuser-createInstallUser.adoc @@ -20,7 +20,7 @@ Data for creating a user |=== |Name|Description|Schema |**username** + -_optional_|The username of the user being created|string +_required_|The username of the user being created|string |**email** + _optional_|The email address of the user being created|string |=== @@ -38,3 +38,14 @@ _optional_|The email address of the user being created|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "username": "newuser", + "email": "newuser@example.com" +}' "https:///api/v1/superuser/users/" +---- \ No newline at end of file diff --git a/modules/api-superuser-createOrganizationQuotaSuperUser.adoc b/modules/api-superuser-createOrganizationQuotaSuperUser.adoc index f9ba5a5cc..861aff96a 100644 --- a/modules/api-superuser-createOrganizationQuotaSuperUser.adoc +++ b/modules/api-superuser-createOrganizationQuotaSuperUser.adoc @@ -46,3 +46,16 @@ _optional_|Number of bytes the organization is allowed|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/users//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-createServiceKey.adoc b/modules/api-superuser-createServiceKey.adoc index b7399c426..96fe1b1cc 100644 --- a/modules/api-superuser-createServiceKey.adoc +++ b/modules/api-superuser-createServiceKey.adoc @@ -20,7 +20,7 @@ Description of creation of a service key |=== |Name|Description|Schema |**service** + -_optional_|The service authenticating with this key|string +_required_|The service authenticating with this key|string |**name** + _optional_|The friendly name of a service key|string |**metadata** + @@ -28,7 +28,7 @@ _optional_|The key/value pairs of this key's metadata|object |**notes** + _optional_|If specified, the extra notes for the key|string |**expiration** + -_optional_|The expiration date as a unix timestamp| +_required_|The expiration date as a unix timestamp| |=== @@ -44,3 +44,18 @@ _optional_|The expiration date as a unix timestamp| |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "service": "", + "expiration": + }' \ + "/api/v1/superuser/keys" +---- \ No newline at end of file diff --git a/modules/api-superuser-createUserQuotaSuperUser.adoc b/modules/api-superuser-createUserQuotaSuperUser.adoc index baf4cd771..cf7f93ffc 100644 --- a/modules/api-superuser-createUserQuotaSuperUser.adoc +++ b/modules/api-superuser-createUserQuotaSuperUser.adoc @@ -30,7 +30,7 @@ Description of a new organization quota |=== |Name|Description|Schema |**limit_bytes** + -_optional_|Number of bytes the organization is allowed|integer +_required_|Number of bytes the organization is allowed|integer |=== @@ -46,3 +46,16 @@ _optional_|Number of bytes the organization is allowed|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240 + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteInstallUser.adoc b/modules/api-superuser-deleteInstallUser.adoc new file mode 100644 index 000000000..15774f8e3 --- /dev/null +++ b/modules/api-superuser-deleteInstallUser.adoc @@ -0,0 +1,46 @@ + += deleteInstallUser +Deletes a user. + +[discrete] +== DELETE /api/v1/superuser/users/{username} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Request body schema (application/json) + +Data for deleting a user + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**username** + +_required_|The username of the user being deleted|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "https:///api/v1/superuser/users/{username}" +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteOrganization.adoc b/modules/api-superuser-deleteOrganization.adoc index 0f2bf137c..4d0604844 100644 --- a/modules/api-superuser-deleteOrganization.adoc +++ b/modules/api-superuser-deleteOrganization.adoc @@ -33,3 +33,13 @@ _required_|The name of the organizaton being managed|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/organizations/" +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc b/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc index 6792048a4..7eba4b703 100644 --- a/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc +++ b/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc @@ -35,3 +35,12 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/superuser/users//quota/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteServiceKey.adoc b/modules/api-superuser-deleteServiceKey.adoc index f63cc7294..e3a3ef0ac 100644 --- a/modules/api-superuser-deleteServiceKey.adoc +++ b/modules/api-superuser-deleteServiceKey.adoc @@ -33,3 +33,13 @@ _required_|The unique identifier for a service key|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys/" +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteUserQuotaSuperUser.adoc b/modules/api-superuser-deleteUserQuotaSuperUser.adoc index fbec9ee06..00317eb84 100644 --- a/modules/api-superuser-deleteUserQuotaSuperUser.adoc +++ b/modules/api-superuser-deleteUserQuotaSuperUser.adoc @@ -35,3 +35,12 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/superuser/organization//quota/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-getRegistrySize.adoc b/modules/api-superuser-getRegistrySize.adoc new file mode 100644 index 000000000..966022e3c --- /dev/null +++ b/modules/api-superuser-getRegistrySize.adoc @@ -0,0 +1,56 @@ + += getRegistrySize + + +[discrete] +== GET /api/v1/superuser/registrysize/ + +**Authorizations: **oauth2_implicit (**super:user**) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + +Description of a image registry size + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|*size_bytes** + +_optional_|Number of bytes the organization is allowed|integer + +|*last_ran* | |integer + +|*queued* | |boolean + +|*running* | |boolean +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|CREATED| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/registrysize/" +---- \ No newline at end of file diff --git a/modules/api-superuser-getRepoBuildLogsSuperUser.adoc b/modules/api-superuser-getRepoBuildLogsSuperUser.adoc index bf2574c93..1494a9613 100644 --- a/modules/api-superuser-getRepoBuildLogsSuperUser.adoc +++ b/modules/api-superuser-getRepoBuildLogsSuperUser.adoc @@ -33,3 +33,12 @@ _required_|The UUID of the build|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//logs" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-getRepoBuildStatusSuperUser.adoc b/modules/api-superuser-getRepoBuildStatusSuperUser.adoc index def8133d5..5202c4238 100644 --- a/modules/api-superuser-getRepoBuildStatusSuperUser.adoc +++ b/modules/api-superuser-getRepoBuildStatusSuperUser.adoc @@ -33,3 +33,12 @@ _required_|The UUID of the build|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//status" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-getRepoBuildSuperUser.adoc b/modules/api-superuser-getRepoBuildSuperUser.adoc index 7a4a13f4f..c5e1a1be0 100644 --- a/modules/api-superuser-getRepoBuildSuperUser.adoc +++ b/modules/api-superuser-getRepoBuildSuperUser.adoc @@ -33,3 +33,12 @@ _required_|The UUID of the build|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//build" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-getServiceKey.adoc b/modules/api-superuser-getServiceKey.adoc index 1e9f30e1c..496250c9f 100644 --- a/modules/api-superuser-getServiceKey.adoc +++ b/modules/api-superuser-getServiceKey.adoc @@ -33,3 +33,13 @@ _required_|The unique identifier for a service key|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys/" +---- \ No newline at end of file diff --git a/modules/api-superuser-listAllLogs.adoc b/modules/api-superuser-listAllLogs.adoc index c6d184f83..498022c4c 100644 --- a/modules/api-superuser-listAllLogs.adoc +++ b/modules/api-superuser-listAllLogs.adoc @@ -40,3 +40,13 @@ _optional_|Earliest time from which to get logs (%m/%d/%Y %Z)|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/logs?starttime=&endtime=&page=&next_page=" +---- \ No newline at end of file diff --git a/modules/api-superuser-listAllOrganizations.adoc b/modules/api-superuser-listAllOrganizations.adoc new file mode 100644 index 000000000..ebebfb951 --- /dev/null +++ b/modules/api-superuser-listAllOrganizations.adoc @@ -0,0 +1,43 @@ + += listAllOrganizations +List the organizations for the current system. + +[discrete] +== GET /api/v1/superuser/organizations + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|name + +required|The name of the organization being managed|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/superuser/organizations/" +---- \ No newline at end of file diff --git a/modules/api-superuser-listAllUsers.adoc b/modules/api-superuser-listAllUsers.adoc index 0dae54351..ff0086bf3 100644 --- a/modules/api-superuser-listAllUsers.adoc +++ b/modules/api-superuser-listAllUsers.adoc @@ -17,6 +17,10 @@ Returns a list of all users in the system. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema +|query|**next_page** + +_optional_|The page token for the next page|string +|query|**limit** + +_optional_|Limit to the number of results to return per page. Max 100.|integer |query|**disabled** + _optional_|If false, only enabled users will be returned.|boolean |=== @@ -34,3 +38,11 @@ _optional_|If false, only enabled users will be returned.|boolean |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/superuser/users/" +---- \ No newline at end of file diff --git a/modules/api-superuser-listOrganizationQuotaSuperUser.adoc b/modules/api-superuser-listOrganizationQuotaSuperUser.adoc index 3fd2d5722..835b94e04 100644 --- a/modules/api-superuser-listOrganizationQuotaSuperUser.adoc +++ b/modules/api-superuser-listOrganizationQuotaSuperUser.adoc @@ -33,3 +33,12 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser/users//quota" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-listServiceKeys.adoc b/modules/api-superuser-listServiceKeys.adoc index 6f0c16e8a..e442bde9f 100644 --- a/modules/api-superuser-listServiceKeys.adoc +++ b/modules/api-superuser-listServiceKeys.adoc @@ -23,3 +23,13 @@ |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys" +---- \ No newline at end of file diff --git a/modules/api-superuser-listUserQuotaSuperUser.adoc b/modules/api-superuser-listUserQuotaSuperUser.adoc index 14670c1a2..5edbee31d 100644 --- a/modules/api-superuser-listUserQuotaSuperUser.adoc +++ b/modules/api-superuser-listUserQuotaSuperUser.adoc @@ -33,3 +33,12 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser/organization//quota" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-postRegistrySize.adoc b/modules/api-superuser-postRegistrySize.adoc new file mode 100644 index 000000000..c37260c47 --- /dev/null +++ b/modules/api-superuser-postRegistrySize.adoc @@ -0,0 +1,64 @@ + += postRegistrySize + + +[discrete] +== POST /api/v1/superuser/registrysize/ + +**Authorizations: **oauth2_implicit (**super:user**) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a image registry size + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema + +|*last_ran* | |integer + +|*queued* | |boolean + +|*running* | |boolean +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|CREATED| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/registrysize/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "namespace": "", + "last_ran": 1700000000, + "queued": true, + "running": false + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-updateServiceKey.adoc b/modules/api-superuser-updateServiceKey.adoc index a153ae6e1..691667dd0 100644 --- a/modules/api-superuser-updateServiceKey.adoc +++ b/modules/api-superuser-updateServiceKey.adoc @@ -50,3 +50,19 @@ _optional_|The expiration date as a unix timestamp| |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "", + "metadata": {"": ""}, + "expiration": + }' \ + "https:///api/v1/superuser/keys/" +---- \ No newline at end of file diff --git a/modules/api-tag-changeTag.adoc b/modules/api-tag-changeTag.adoc index b92bbf5ad..ecb9bb2b9 100644 --- a/modules/api-tag-changeTag.adoc +++ b/modules/api-tag-changeTag.adoc @@ -16,10 +16,10 @@ Change which image a tag points to or create a new tag. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**tag** + -_required_|The name of the tag|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**tag** + +_required_|The name of the tag|string |=== @@ -44,9 +44,23 @@ _optional_|(If specified) The expiration for the image| [options="header", width=100%, cols=".^2a,.^14a,.^4a"] |=== |HTTP Code|Description|Schema -|200|Successful invocation| +|201|Successful invocation| |400|Bad Request|<<_apierror,ApiError>> |401|Session required|<<_apierror,ApiError>> |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": "" + }' \ + https:///api/v1/repository///tag/ +---- \ No newline at end of file diff --git a/modules/api-tag-deleteFullTag.adoc b/modules/api-tag-deleteFullTag.adoc index a83b94fec..a4655f51e 100644 --- a/modules/api-tag-deleteFullTag.adoc +++ b/modules/api-tag-deleteFullTag.adoc @@ -16,10 +16,10 @@ Delete the specified repository tag. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**tag** + -_required_|The name of the tag|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**tag** + +_required_|The name of the tag|string |=== @@ -35,3 +35,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository///tag/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-tag-listRepoTags.adoc b/modules/api-tag-listRepoTags.adoc index 78e4fc881..f8f253d8a 100644 --- a/modules/api-tag-listRepoTags.adoc +++ b/modules/api-tag-listRepoTags.adoc @@ -33,6 +33,8 @@ _optional_|Filter to only active tags.|boolean _optional_|Page index for the results. Default 1.|integer |query|**limit** + _optional_|Limit to the number of results to return per page. Max 100.|integer +|query|**filter_tag_name** + +_optional_|Syntax: <op>:<name> Filters the tag names based on the operation.<op> can be 'like' or 'eq'.|string |query|**specificTag** + _optional_|Filters the tags to the specific tag.|string |=== @@ -50,3 +52,14 @@ _optional_|Filters the tags to the specific tag.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag/ +---- \ No newline at end of file diff --git a/modules/api-tag-removeTagFromTimemachine.adoc b/modules/api-tag-removeTagFromTimemachine.adoc new file mode 100644 index 000000000..b3cb4c216 --- /dev/null +++ b/modules/api-tag-removeTagFromTimemachine.adoc @@ -0,0 +1,54 @@ + += removeTagFromTimemachine +Updates any expired tags with the matching name and manifest with an expiry outside the time machine window + +[discrete] +== POST /api/v1/repository/{repository}/tag/{tag}/expire + + + +**Authorizations: **oauth2_implicit (**repo:write**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**tag** + +_required_|The name of the tag|string +|=== + + +[discrete] +== Request body schema (application/json) + +Removes tag from the time machine window + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**manifest_digest** + +_optional_|Required if is_alive set to false. If specified, the manifest digest that should be used. Ignored when setting alive to true.|string +|**include_submanifests** + +_optional_|If set to true, expire the sub-manifests as well|boolean +|**is_alive** + +_optional_|If true, set the expiry of the matching alive tag outside the time machine window. If false set the expiry of any expired tags with the same tag and manifest outside the time machine window.|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-tag-restoreTag.adoc b/modules/api-tag-restoreTag.adoc index e7667da61..48d281ae0 100644 --- a/modules/api-tag-restoreTag.adoc +++ b/modules/api-tag-restoreTag.adoc @@ -16,10 +16,10 @@ Restores a repository tag back to a previous image in the repository. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**tag** + -_required_|The name of the tag|string |path|**repository** + _required_|The full path of the repository. e.g. namespace/name|string +|path|**tag** + +_required_|The name of the tag|string |=== @@ -32,7 +32,7 @@ Restores a tag to a specific image |=== |Name|Description|Schema |**manifest_digest** + -_optional_|If specified, the manifest digest that should be used|string +_required_|If specified, the manifest digest that should be used|string |=== @@ -48,3 +48,17 @@ _optional_|If specified, the manifest digest that should be used|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": + }' \ + quay-server.example.com/api/v1/repository/quayadmin/busybox/tag/test/restore +---- \ No newline at end of file diff --git a/modules/api-team-deleteOrganizationTeam.adoc b/modules/api-team-deleteOrganizationTeam.adoc index 2b31d304e..ac787ee2e 100644 --- a/modules/api-team-deleteOrganizationTeam.adoc +++ b/modules/api-team-deleteOrganizationTeam.adoc @@ -16,10 +16,10 @@ Delete the specified team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**teamname** + _required_|The name of the team|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -35,3 +35,12 @@ _required_|The name of the team|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team/" +---- \ No newline at end of file diff --git a/modules/api-team-deleteOrganizationTeamMember.adoc b/modules/api-team-deleteOrganizationTeamMember.adoc index e9cbfbaab..4edf232b4 100644 --- a/modules/api-team-deleteOrganizationTeamMember.adoc +++ b/modules/api-team-deleteOrganizationTeamMember.adoc @@ -18,12 +18,12 @@ Delete a member of a team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**teamname** + _required_|The name of the team|string |path|**membername** + _required_|The username of the team member|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -39,3 +39,13 @@ _required_|The username of the team member|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members/" +---- \ No newline at end of file diff --git a/modules/api-team-deleteTeamMemberEmailInvite.adoc b/modules/api-team-deleteTeamMemberEmailInvite.adoc index af324374e..7999012e5 100644 --- a/modules/api-team-deleteTeamMemberEmailInvite.adoc +++ b/modules/api-team-deleteTeamMemberEmailInvite.adoc @@ -16,11 +16,11 @@ Delete an invite of an email address to join a team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + +|path|**email** + _required_||string |path|**teamname** + _required_||string -|path|**email** + +|path|**orgname** + _required_||string |=== @@ -37,3 +37,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//invite/" +---- \ No newline at end of file diff --git a/modules/api-team-getOrganizationTeamMembers.adoc b/modules/api-team-getOrganizationTeamMembers.adoc index 14f5e8e64..f7c74a055 100644 --- a/modules/api-team-getOrganizationTeamMembers.adoc +++ b/modules/api-team-getOrganizationTeamMembers.adoc @@ -16,10 +16,10 @@ Retrieve the list of members for the specified team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**teamname** + _required_|The name of the team|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -46,3 +46,13 @@ _optional_|Whether to include pending members|boolean |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members" +---- \ No newline at end of file diff --git a/modules/api-team-getOrganizationTeamPermissions.adoc b/modules/api-team-getOrganizationTeamPermissions.adoc index fa782e299..03349ed04 100644 --- a/modules/api-team-getOrganizationTeamPermissions.adoc +++ b/modules/api-team-getOrganizationTeamPermissions.adoc @@ -14,10 +14,10 @@ Returns the list of repository permissions for the org's team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**teamname** + _required_|The name of the team|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -33,3 +33,13 @@ _required_|The name of the team|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//permissions" +---- \ No newline at end of file diff --git a/modules/api-team-inviteTeamMemberEmail.adoc b/modules/api-team-inviteTeamMemberEmail.adoc index c0f73c997..1bb1ae03a 100644 --- a/modules/api-team-inviteTeamMemberEmail.adoc +++ b/modules/api-team-inviteTeamMemberEmail.adoc @@ -16,11 +16,11 @@ Invites an email address to an existing team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + +|path|**email** + _required_||string |path|**teamname** + _required_||string -|path|**email** + +|path|**orgname** + _required_||string |=== @@ -37,3 +37,13 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//invite/" +---- \ No newline at end of file diff --git a/modules/api-team-updateOrganizationTeam.adoc b/modules/api-team-updateOrganizationTeam.adoc index cd6b715c3..0c5b7255a 100644 --- a/modules/api-team-updateOrganizationTeam.adoc +++ b/modules/api-team-updateOrganizationTeam.adoc @@ -2,6 +2,11 @@ = updateOrganizationTeam Update the org-wide permission for the specified team. +[NOTE] +==== +This API is also used to create a team. +==== + [discrete] == PUT /api/v1/organization/{orgname}/team/{teamname} @@ -16,10 +21,10 @@ Update the org-wide permission for the specified team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**teamname** + _required_|The name of the team|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -32,7 +37,7 @@ Description of a team |=== |Name|Description|Schema |**role** + -_optional_|Org wide permissions that should apply to the team|string +_required_|Org wide permissions that should apply to the team|string |**description** + _optional_|Markdown description for the team|string |=== @@ -50,3 +55,11 @@ _optional_|Markdown description for the team|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -k -X PUT -H 'Accept: application/json' -H 'Content-Type: application/json' -H "Authorization: Bearer " --data '{"role": "creator"}' https:///api/v1/organization//team/ +---- \ No newline at end of file diff --git a/modules/api-team-updateOrganizationTeamMember.adoc b/modules/api-team-updateOrganizationTeamMember.adoc index e77efc676..956e06874 100644 --- a/modules/api-team-updateOrganizationTeamMember.adoc +++ b/modules/api-team-updateOrganizationTeamMember.adoc @@ -16,12 +16,12 @@ Adds or invites a member to an existing team. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**orgname** + -_required_|The name of the organization|string |path|**teamname** + _required_|The name of the team|string |path|**membername** + _required_|The username of the team member|string +|path|**orgname** + +_required_|The name of the organization|string |=== @@ -37,3 +37,13 @@ _required_|The username of the team member|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members/" +---- \ No newline at end of file diff --git a/modules/api-trigger-activateBuildTrigger.adoc b/modules/api-trigger-activateBuildTrigger.adoc index 90beb7e33..795a22e81 100644 --- a/modules/api-trigger-activateBuildTrigger.adoc +++ b/modules/api-trigger-activateBuildTrigger.adoc @@ -16,10 +16,10 @@ Activate the specified build trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**trigger_uuid** + _required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -32,7 +32,7 @@ _required_|The UUID of the build trigger|string |=== |Name|Description|Schema |**config** + -_optional_|Arbitrary json.|object +_required_|Arbitrary json.|object |**pull_robot** + _optional_|The name of the robot that will be used to pull images.|string |=== @@ -50,3 +50,19 @@ _optional_|The name of the robot that will be used to pull images.|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/activate" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "config": { + "branch": "main" + }, + "pull_robot": "example+robot" + }' +---- \ No newline at end of file diff --git a/modules/api-trigger-deleteBuildTrigger.adoc b/modules/api-trigger-deleteBuildTrigger.adoc index 17aad9b57..becc47dd3 100644 --- a/modules/api-trigger-deleteBuildTrigger.adoc +++ b/modules/api-trigger-deleteBuildTrigger.adoc @@ -16,10 +16,10 @@ Delete the specified build trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**trigger_uuid** + _required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -35,3 +35,12 @@ _required_|The UUID of the build trigger|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-trigger-getBuildTrigger.adoc b/modules/api-trigger-getBuildTrigger.adoc index cdab3d32e..c1253d879 100644 --- a/modules/api-trigger-getBuildTrigger.adoc +++ b/modules/api-trigger-getBuildTrigger.adoc @@ -16,10 +16,10 @@ Get information for the specified build trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**trigger_uuid** + _required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -35,3 +35,12 @@ _required_|The UUID of the build trigger|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-trigger-listBuildTriggers.adoc b/modules/api-trigger-listBuildTriggers.adoc index 40cc3042c..46ab2ebb0 100644 --- a/modules/api-trigger-listBuildTriggers.adoc +++ b/modules/api-trigger-listBuildTriggers.adoc @@ -33,3 +33,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-trigger-listTriggerRecentBuilds.adoc b/modules/api-trigger-listTriggerRecentBuilds.adoc index 6b4930d2c..4ec3f6506 100644 --- a/modules/api-trigger-listTriggerRecentBuilds.adoc +++ b/modules/api-trigger-listTriggerRecentBuilds.adoc @@ -16,10 +16,10 @@ List the builds started by the specified trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**trigger_uuid** + _required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -46,3 +46,12 @@ _optional_|The maximum number of builds to return|integer |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/builds?limit=10" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-trigger-manuallyStartBuildTrigger.adoc b/modules/api-trigger-manuallyStartBuildTrigger.adoc index e65de5f2f..4a3cd8624 100644 --- a/modules/api-trigger-manuallyStartBuildTrigger.adoc +++ b/modules/api-trigger-manuallyStartBuildTrigger.adoc @@ -16,10 +16,10 @@ Manually start a build from the specified trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**trigger_uuid** + _required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -52,3 +52,18 @@ _optional_|(SCM Only) If specified, the ref to build.| |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/start" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "branch_name": "main", + "commit_sha": "abcdef1234567890", + "refs": "refs/heads/main" + }' +---- \ No newline at end of file diff --git a/modules/api-trigger-updateBuildTrigger.adoc b/modules/api-trigger-updateBuildTrigger.adoc index a161b524a..92aa6e0c1 100644 --- a/modules/api-trigger-updateBuildTrigger.adoc +++ b/modules/api-trigger-updateBuildTrigger.adoc @@ -16,10 +16,10 @@ Updates the specified build trigger. [options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] |=== |Type|Name|Description|Schema -|path|**repository** + -_required_|The full path of the repository. e.g. namespace/name|string |path|**trigger_uuid** + _required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string |=== @@ -32,7 +32,7 @@ Options for updating a build trigger |=== |Name|Description|Schema |**enabled** + -_optional_|Whether the build trigger is enabled|boolean +_required_|Whether the build trigger is enabled|boolean |=== @@ -48,3 +48,14 @@ _optional_|Whether the build trigger is enabled|boolean |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"enabled": true}' +---- \ No newline at end of file diff --git a/modules/api-user-createStar.adoc b/modules/api-user-createStar.adoc index 80cecd745..741ae5b9f 100644 --- a/modules/api-user-createStar.adoc +++ b/modules/api-user-createStar.adoc @@ -20,9 +20,9 @@ Star a repository. |=== |Name|Description|Schema |**namespace** + -_optional_|Namespace in which the repository belongs|string +_required_|Namespace in which the repository belongs|string |**repository** + -_optional_|Repository name|string +_required_|Repository name|string |=== @@ -38,3 +38,17 @@ _optional_|Repository name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/user/starred" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "namespace": "", + "repository": "" + }' +---- \ No newline at end of file diff --git a/modules/api-user-deleteStar.adoc b/modules/api-user-deleteStar.adoc index 3279eae82..10ff5a061 100644 --- a/modules/api-user-deleteStar.adoc +++ b/modules/api-user-deleteStar.adoc @@ -33,3 +33,12 @@ _required_|The full path of the repository. e.g. namespace/name|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/user/starred/namespace/repository-name" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-user-getLoggedInUser.adoc b/modules/api-user-getLoggedInUser.adoc index 6c437a07c..1e12e6b18 100644 --- a/modules/api-user-getLoggedInUser.adoc +++ b/modules/api-user-getLoggedInUser.adoc @@ -23,3 +23,12 @@ Get user information for the authenticated user. |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-user-getUserInformation.adoc b/modules/api-user-getUserInformation.adoc index 03f19539b..fdcdaa6f3 100644 --- a/modules/api-user-getUserInformation.adoc +++ b/modules/api-user-getUserInformation.adoc @@ -31,3 +31,12 @@ _required_||string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/users/example_user" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-user-listStarredRepos.adoc b/modules/api-user-listStarredRepos.adoc index 0a113383c..87030d47e 100644 --- a/modules/api-user-listStarredRepos.adoc +++ b/modules/api-user-listStarredRepos.adoc @@ -34,3 +34,12 @@ _optional_|The page token for the next page|string |403|Unauthorized access|<<_apierror,ApiError>> |404|Not found|<<_apierror,ApiError>> |=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/starred?next_page=" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/arch-georpl-features.adoc b/modules/arch-georpl-features.adoc new file mode 100644 index 000000000..98372e342 --- /dev/null +++ b/modules/arch-georpl-features.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="arch-georpl-features"] += Geo-replication features + +* When geo-replication is configured, container image pushes will be written to the preferred storage engine for that {productname} instance. This is typically the nearest storage backend within the region. + +* After the initial push, image data will be replicated in the background to other storage engines. + +* The list of replication locations is configurable and those can be different storage backends. + +* An image pull will always use the closest available storage engine, to maximize pull performance. + +* If replication has not been completed yet, the pull will use the source storage backend instead. \ No newline at end of file diff --git a/modules/arch-intro-security.adoc b/modules/arch-intro-security.adoc index 3ee1401e5..1e92be54c 100644 --- a/modules/arch-intro-security.adoc +++ b/modules/arch-intro-security.adoc @@ -9,7 +9,7 @@ [id="arch-tls-ssl-config"] == TLS/SSL configuration -You can configure SSL/TLS for the {productname} registry in the configuration tool UI or in the configuration bundle. SSL/TSL connections to the database, to image storage, and to Redis can also be specified through configuration. +You can configure SSL/TLS for the {productname} registry in the configuration tool UI or in the configuration bundle. SSL/TLS connections to the database, to image storage, and to Redis can also be specified through the configuration tool. Sensitive fields in the database and at run time are automatically encrypted. You can also require HTTPS and verify certificates for the {productname} registry during mirror operations. diff --git a/modules/arch-mirror-registry.adoc b/modules/arch-mirror-registry.adoc index 46b58627f..68e328c0c 100644 --- a/modules/arch-mirror-registry.adoc +++ b/modules/arch-mirror-registry.adoc @@ -14,4 +14,4 @@ The _mirror registry for Red Hat OpenShift_ is limited to hosting images that ar Unlike {productname}, the _mirror registry for Red Hat OpenShift_ is not a highly-available registry. Only local file system storage is supported. Using the _mirror registry for Red Hat OpenShift_ with more than one cluster is discouraged, because multiple clusters can create a single point of failure when updating your cluster fleet. It is advised to use the _mirror registry for Red Hat OpenShift_ to install a cluster that can host a production-grade, highly available registry such as {productname}, which can serve {ocp} content to other clusters. -More information is available at link:https://docs.openshift.com/container-platform/4.10/installing/disconnected_install/installing-mirroring-creating-registry.html[Creating a mirror registry with _mirror registry for Red Hat OpenShift_]. \ No newline at end of file +More information is available at link:https://docs.openshift.com/container-platform/{ocp-y}/installing/disconnected_install/installing-mirroring-creating-registry.html[Creating a mirror registry with _mirror registry for Red Hat OpenShift_]. \ No newline at end of file diff --git a/modules/attributes.adoc b/modules/attributes.adoc index fc5924850..6aefa202a 100644 --- a/modules/attributes.adoc +++ b/modules/attributes.adoc @@ -1,21 +1,25 @@ :productname: Red Hat Quay +:productname-ocp: Red Hat Quay on OpenShift Container Platform +:quayio: Quay.io :productshortname: Quay :imagesdir: ../images :ocp: OpenShift Container Platform +:odf: Red Hat OpenShift Data Foundation :qbo: Quay Bridge Operator :rhel: Red Hat Enterprise Linux (RHEL) :rhel-short: RHEL +:ocp-y: 4.17 ifeval::["{productname}" == "Project Quay"] :upstream: :productname: Project Quay :productversion: 3 -:producty: 3.8 -:productminv: v3.8.1 +:producty: 3.14 +:productminv: v3.14.0 :productrepo: quay.io/projectquay :quayimage: quay :clairimage: clair -:clairproductminv: 4.6.0 +:clairproductminv: 4.8 :builderimage: quay-builder :builderqemuimage: quay-builder-qemu:main :postgresimage: centos/postgresql-10-centos7@sha256:de1560cb35e5ec643e7b3a772ebaac8e3a7a2a8e8271d9e91ff023539b4dfb33 @@ -26,16 +30,17 @@ ifeval::["{productname}" == "Red Hat Quay"] :downstream: :productname: Red Hat Quay :productversion: 3 -:producty: 3.8 -:productmin: 3.8.2 -:productminv: v3.8.2 +:producty: 3.14 +:producty-n1: 3.13 +:productmin: 3.14.0 +:productminv: v3.14.0 :productrepo: registry.redhat.io/quay -:clairnewver: v3.8 +:clairnewver: v3.14 :quayimage: quay-rhel8 :clairimage: clair-rhel8 -:clairproductminv: 4.6.0 +:clairproductminv: 4.8 :builderimage: quay-builder-rhel8 :builderqemuimage: quay-builder-qemu-rhcos -:postgresimage: registry.redhat.io/rhel8/postgresql-13:1-109 +:postgresimage: registry.redhat.io/rhel8/postgresql-13 :redisimage: registry.redhat.io/rhel8/redis-6:1-110 endif::[] diff --git a/modules/authentication-troubleshooting-issues.adoc b/modules/authentication-troubleshooting-issues.adoc new file mode 100644 index 000000000..c8b8a2c03 --- /dev/null +++ b/modules/authentication-troubleshooting-issues.adoc @@ -0,0 +1,50 @@ +:_content-type: CONCEPT +[id="authentication-troubleshooting-issues"] += Troubleshooting {productname} authentication and authorization issues for specific users + +Use the following procedure to troubleshoot authentication and authorization issues for specific users. + +.Procedure + +. Exec into the {productname} pod or container. For more information, see "Interacting with the {productname} database". + +. Enter the following command to show all users for external authentication: ++ +[source,terminal] +---- +quay=# select * from federatedlogin; +---- ++ +.Example output ++ +[source,terminal] +---- +id | user_id | service_id | service_ident | metadata_json +----+---------+------------+---------------------------------------------+------------------------------------------- +1 | 1 | 3 | testuser0 | {} +2 | 1 | 8 | PK7Zpg2Yu2AnfUKG15hKNXqOXirqUog6G-oE7OgzSWc | {"service_username": "live.com#testuser0"} +3 | 2 | 3 | testuser1 | {} +4 | 2 | 4 | 110875797246250333431 | {"service_username": "testuser1"} +5 | 3 | 3 | testuser2 | {} +6 | 3 | 1 | 26310880 | {"service_username": "testuser2"} +(6 rows) +---- + +. Verify that the users are inserted into the `user` table: ++ +[source,terminal] +---- +quay=# select username, email from "user"; +---- ++ +.Example output ++ +[source,terminal] +---- +username | email +-----------+---------------------- +testuser0 | testuser0@outlook.com +testuser1 | testuser1@gmail.com +testuser2 | testuser2@redhat.com +(3 rows) +---- \ No newline at end of file diff --git a/modules/authentication-troubleshooting.adoc b/modules/authentication-troubleshooting.adoc new file mode 100644 index 000000000..ca4b997f8 --- /dev/null +++ b/modules/authentication-troubleshooting.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="authentication-troubleshooting"] += Troubleshooting {productname} authentication + +Authentication and authorization is crucial for secure access to {productname}. Together, they safeguard sensitive container images, verify user identities, enforce access controls, facilitate auditing and accountability, and enable seamless integration with external identity providers. By prioritizing authentication, organizations can bolster the overall security and integrity of their container registry environment. + +The following authentication methods are supported by {productname}: + +* *Username and password*. Users can authentication by providing their username and password, which are validated against the user database configured in {productname}. This traditional method requires users to enter their credentials to gain access. + +* *OAuth*. {productname} supports OAuth authentication, which allows users to authenticate using their credentials from third party services like Google, GitHub, or Keycloak. OAuth enables a seamless and federated login experience, eliminating the need for separate account creation and simplifying user management. + +* *OIDC*. OpenID Connect enables single sign-on (SSO) capabilities and integration with enterprise identity providers. With OpenID Connect, users can authenticate using their existing organizational credentials, providing a unified authentication experience across various systems and applications. + +* *Token-based authentication*. Users can obtain unique tokens that grant access to specific resources within {productname}. Tokens can be obtained through various means, such as OAuth or by generating API tokens within the {productname} user interface. Token-based authentication is often used for automated or programmatic access to the registry. + +* *External identity provider*. {productname} can integrate with external identity providers, such as LDAP or AzureAD, for authentication purposes. This integration allows organizations to use their existing identity management infrastructure, enabling centralized user authentication and reducing the need for separate user databases. \ No newline at end of file diff --git a/modules/automating-quay-using-the-api.adoc b/modules/automating-quay-using-the-api.adoc new file mode 100644 index 000000000..eccf374d6 --- /dev/null +++ b/modules/automating-quay-using-the-api.adoc @@ -0,0 +1,112 @@ +:_content-type: REFERENCE +[id="automating-quay-using-the-api"] += Automating {productname} processes by using the API + +With the API, {productname} administrators and users with access to the API can automate repetitive tasks such as repository management or image pruning. + +The following example shows you how you might use a Python script and a cron job to automate the deletion of OAuth 2 applications _except_ the administrator's token. This might be useful if you want to ensure an application associated with an OAuth 2 access token is cycled after a certain period of time. + +.Prerequisites + +* You have access to the {productname} API, which entails having already created an OAuth 2 access token. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have installed the Python `requests` library using. +* You have enabled cron jobs on your machine. +* You have created several organization applications, including one that will not be deleted. + +.Procedure + +. Create a Python script that executes an API command. The following example is used to delete organization applications using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationapplication[`DELETE /api/v1/organization/{orgname}/applications/{client_id}`] API endpoint. ++ +.example.py file +[source,python] +---- +import requests <1> + +# Hard-coded values +API_BASE_URL = "http:///api/v1" <2> +ACCESS_TOKEN = "" <3> +ORG_NAME = "" <4> + +def get_all_organization_applications(): + url = f"{API_BASE_URL}/organization/{ORG_NAME}/applications" + headers = { + "Authorization": f"Bearer {ACCESS_TOKEN}" + } + + response = requests.get(url, headers=headers) + + if response.status_code == 200: + try: + applications = response.json() + # Print the raw response for debugging + print("Raw response:", applications) + + # Adjust parsing logic based on the response structure + if isinstance(applications, dict) and 'applications' in applications: + applications = applications['applications'] + + if isinstance(applications, list): + print("Organization applications retrieved successfully:") + for app in applications: + # Updated key from 'title' to 'name' + print(f"Name: {app['name']}, Client ID: {app['client_id']}") + return applications + else: + print("Unexpected response format.") + return [] + except requests.exceptions.JSONDecodeError: + print("Error decoding JSON response:", response.text) + return [] + else: + print(f"Failed to retrieve applications. Status code: {response.status_code}, Response: {response.text}") + return [] + +def delete_organization_application(client_id): + url = f"{API_BASE_URL}/organization/{ORG_NAME}/applications/{client_id}" + headers = { + "Authorization": f"Bearer {ACCESS_TOKEN}" + } + + response = requests.delete(url, headers=headers) + + if response.status_code == 204: + print(f"Application {client_id} deleted successfully.") + else: + print(f"Failed to delete application {client_id}. Status code: {response.status_code}, Response: {response.text}") + +def main(): + applications = get_all_organization_applications() + for app in applications: + if app['name'] != "": <5> # Skip the "admin-token-app" + delete_organization_application(app['client_id']) + else: + print(f"Skipping deletion of application: {app['name']}") + +# Execute the main function +main() +---- +<1> Includes the `import` library in your Python code. +<2> The URL of your registry appended with `/api/v1`. +<3> Your OAuth 2 access token. +<4> The organization that holds the application. +<5> The name of the application token to remain. + +. Save the script as `prune_applications.py`. + +. Create a cron job that automatically runs the script: + +.. Open the crontab editor by running the following command: ++ +[source,terminal] +---- +$ crontab -e +---- + +.. In the editor, add the cron job for running the script. The following example runs the script once per month: ++ +[source,text] +---- +0 0 1 * * sudo python /path/to/prune_images.py >> /var/log/prune_images.log 2>&1 +---- + diff --git a/modules/backing-up-and-restoring-intro.adoc b/modules/backing-up-and-restoring-intro.adoc index ff9b30ba7..a1eacfae7 100644 --- a/modules/backing-up-and-restoring-intro.adoc +++ b/modules/backing-up-and-restoring-intro.adoc @@ -1,4 +1,5 @@ -[[backing-up-and-restoring-intro]] +:_content-type: CONCEPT +[id="backing-up-and-restoring-intro"] = Backing up and restoring {productname} managed by the {productname} Operator -Use the content within this section to back up and restore {productname} when managed by the {productname} Operator on OpenShift Container Platform. +Use the content within this section to back up and restore {productname} when managed by the {productname} Operator on {ocp} diff --git a/modules/backing-up-red-hat-quay-operator.adoc b/modules/backing-up-red-hat-quay-operator.adoc index 2ef80332f..409bf7809 100644 --- a/modules/backing-up-red-hat-quay-operator.adoc +++ b/modules/backing-up-red-hat-quay-operator.adoc @@ -1,28 +1,42 @@ -[[backing-up-red-hat-quay-operator]] +:_content-type: PROCEDURE +[id="backing-up-red-hat-quay-operator"] = Backing up {productname} -This procedure describes how to create a backup of {productname} deployed on OpenShift Container Platform using the {productname} Operator +Database backups should be performed regularly using either the supplied tools on the PostgreSQL image or your own backup infrastructure. The {productname} Operator does not ensure that the PostgreSQL database is backed up. + +[NOTE] +==== +This procedure covers backing up your {productname} PostgreSQL database. It does not cover backing up the Clair PostgreSQL database. Strictly speaking, backing up the Clair PostgreSQL database is not needed because it can be recreated. If you opt to recreate it from scratch, you will wait for the information to be repopulated after all images inside of your {productname} deployment are scanned. During this downtime, security reports are unavailable. + +If you are considering backing up the Clair PostgreSQL database, you must consider that its size is dependent upon the number of images stored inside of {productname}. As a result, the database can be extremely large. +==== + +This procedure describes how to create a backup of {productname-ocp} using the Operator. .Prerequisites -* A healthy {productname} deployment on OpenShift Container Platform using the {productname} Operator (status condition `Available` is set to `true`) +* A healthy {productname} deployment on {ocp} using the {productname} Operator. The status condition `Available` is set to `true`. * The components `quay`, `postgres` and `objectstorage` are set to `managed: true` -* If the component `clair` is set to `managed: true` the component `clairpostgres` is also set to `managed: true` (starting with {productname} Operator v3.7 or later) +* If the component `clair` is set to `managed: true` the component `clairpostgres` is also set to `managed: true` (starting with {productname} v3.7 or later) [NOTE] ==== -If your deployment contains partially unmanaged database or storage components and you are using external services for Postgres or S3-compatible object storage to run your {productname} deployment, you must refer to the service provider or vendor documentation to create a backup of the data. -You can refer to the tools described in this guide as a starting point on how to backup your external Postgres database or object storage. +If your deployment contains partially unmanaged database or storage components and you are using external services for PostgreSQL or S3-compatible object storage to run your {productname} deployment, you must refer to the service provider or vendor documentation to create a backup of the data. +You can refer to the tools described in this guide as a starting point on how to backup your external PostgreSQL database or object storage. ==== +[id="quay-configuration-backup"] == {productname} configuration backup +Use the following procedure to back up your {productname} configuration. -. Backup the `QuayRegistry` custom resource by exporting it: +.Procedure + +. To back the `QuayRegistry` custom resource by exporting it, enter the following command: + [source,terminal] ---- -$ oc get quayregistry -n -o yaml > quay-registry.yaml +$ oc get quayregistry -n -o yaml > quay-registry.yaml ---- . Edit the resulting `quayregistry.yaml` and remove the status section and the following metadata fields: @@ -36,19 +50,19 @@ $ oc get quayregistry -n -o yaml > quay-re metadata.uid ---- -. Backup the managed keys secret: +. Backup the managed keys secret by entering the following command: + [NOTE] ==== -If you are running a version older than {productname} 3.7.0, this step can be skipped. Some secrets are automatically generated while deploying Quay for the first time. These are stored in a secret called `-quay-registry-managed-secret-keys` in the namespace of the `QuayRegistry` resource. +If you are running a version older than {productname} 3.7.0, this step can be skipped. Some secrets are automatically generated while deploying {productname} for the first time. These are stored in a secret called `-quay_registry_managed_secret_keys` in the namespace of the `QuayRegistry` resource. ==== + [source,terminal] ---- -$ oc get secret -n -quay-registry-managed-secret-keys -o yaml > managed-secret-keys.yaml +$ oc get secret -n _quay_registry_managed_secret_keys -o yaml > managed_secret_keys.yaml ---- -. Edit the the resulting `managed-secret-keys.yaml` file and remove the entry `metadata.ownerReferences`. Your `managed-secret-keys.yaml` file should look similar to the following: +. Edit the resulting `managed_secret_keys.yaml` file and remove the entry `metadata.ownerReferences`. Your `managed_secret_keys.yaml` file should look similar to the following: + [source,yaml] ---- @@ -56,8 +70,8 @@ apiVersion: v1 kind: Secret type: Opaque metadata: - name: -quay-registry-managed-secret-keys - namespace: + name: _quay_registry_managed_secret_keys> + namespace: data: CONFIG_EDITOR_PW: DATABASE_SECRET_KEY: @@ -69,28 +83,35 @@ data: + All information under the `data` property should remain the same. -. Backup the current Quay configuration: +. Redirect the current `Quay` configuration file by entering the following command: + [source,terminal] ---- -$ oc get secret -n $(oc get quayregistry -n -o jsonpath='{.spec.configBundleSecret}') -o yaml > config-bundle.yaml +$ oc get secret -n $(oc get quayregistry -n -o jsonpath='{.spec.configBundleSecret}') -o yaml > config-bundle.yaml ---- -. Backup the `/conf/stack/config.yaml` file mounted inside of the Quay pods: +. Backup the `/conf/stack/config.yaml` file mounted inside of the `Quay` pods: + [source,terminal] ---- -$ oc exec -it quay-pod-name -- cat /conf/stack/config.yaml > quay-config.yaml +$ oc exec -it quay_pod_name -- cat /conf/stack/config.yaml > quay_config.yaml ---- -== Scale down your {productname} deployment +[id="scaling-down-quay-deployment"] +== Scaling down your {productname} deployment + +Use the following procedure to scale down your {productname} deployment. [IMPORTANT] ==== -This step is needed to create a consistent backup of the state of your {productname} deployment. Do not omit this step, including in setups where Postgres databases and/or S3-compatible object storage are provided by external services (unmanaged by the Operator). +This step is needed to create a consistent backup of the state of your {productname} deployment. Do not omit this step, including in setups where PostgreSQL databases and/or S3-compatible object storage are provided by external services (unmanaged by the {productname} Operator). ==== -. *For Operator version 3.7 and newer:* Scale down the {productname} deployment by disabling auto scaling and overriding the replica count for {productname}, mirror workers, and Clair (if managed). Your `QuayRegistry` resource should look similar to the following: +.Procedure + +. Depending on the version of your {productname} deployment, scale down your deployment using one of the following options. + +.. *For Operator version 3.7 and newer:* Scale down the {productname} deployment by disabling auto scaling and overriding the replica count for {productname}, mirror workers, and Clair (if managed). Your `QuayRegistry` resource should look similar to the following: + [source,yaml] ---- @@ -121,13 +142,25 @@ spec: <1> Disable auto scaling of Quay, Clair and Mirroring workers <2> Set the replica count to 0 for components accessing the database and objectstorage -. *For Operator version 3.6 and earlier*: Scale down the {productname} deployment by scaling down the {productname} Operator first and then the managed {productname} resources: +.. *For Operator version 3.6 and earlier*: Scale down the {productname} deployment by scaling down the {productname} registry first and then the managed {productname} resources: + [source,terminal] ---- $ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/^quay-operator/ {print $1}') -n +---- ++ +[source,terminal] +---- $ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-app/ {print $1}') -n +---- ++ +[source,terminal] +---- $ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-mirror/ {print $1}') -n +---- ++ +[source,terminal] +---- $ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/clair-app/ {print $1}') -n ---- @@ -135,7 +168,7 @@ $ oc scale --replicas=0 deployment $(oc get deployment -n |awk ' + [source,terminal] ---- -$ oc get pods -n +$ oc get pods -n ---- + Example output: @@ -143,27 +176,36 @@ Example output: [source,terminal] ---- $ oc get pod - +---- ++ +.Example output ++ +[source,terminal] +---- quay-operator.v3.7.1-6f9d859bd-p5ftc 1/1 Running 0 12m quayregistry-clair-postgres-7487f5bd86-xnxpr 1/1 Running 1 (12m ago) 12m quayregistry-quay-app-upgrade-xq2v6 0/1 Completed 0 12m -quayregistry-quay-config-editor-6dfdcfc44f-hlvwm 1/1 Running 0 73s quayregistry-quay-database-859d5445ff-cqthr 1/1 Running 0 12m quayregistry-quay-redis-84f888776f-hhgms 1/1 Running 0 12m ---- -== {productname} managed database backup +[id="backing-up-managed-database"] +== Backing up the {productname} managed database + +Use the following procedure to back up the {productname} managed database. [NOTE] ==== -If your {productname} deployment is configured with external (unmanged) Postgres database(s), refer to your vendor's documentation on how to create a consistent backup of these databases. +If your {productname} deployment is configured with external, or unmanged, PostgreSQL database(s), refer to your vendor's documentation on how to create a consistent backup of these databases. ==== +.Procedure + . Identify the Quay PostgreSQL pod name: + [source,terminal] ---- -$ oc get pod -l quay-component=postgres -n -o jsonpath='{.items[0].metadata.name}' +$ oc get pod -l quay-component=postgres -n -o jsonpath='{.items[0].metadata.name}' ---- + Example output: @@ -177,7 +219,7 @@ quayregistry-quay-database-59f54bb7-58xs7 + [source,terminal] ---- -$ oc -n rsh $(oc get pod -l app=quay -o NAME -n |head -n 1) cat /conf/stack/config.yaml|awk -F"/" '/^DB_URI/ {print $4}' +$ oc -n rsh $(oc get pod -l app=quay -o NAME -n |head -n 1) cat /conf/stack/config.yaml|awk -F"/" '/^DB_URI/ {print $4}' quayregistry-quay-database ---- @@ -185,12 +227,13 @@ quayregistry-quay-database + [source,terminal] ---- -$ oc exec quayregistry-quay-database-59f54bb7-58xs7 -- /usr/bin/pg_dump -C quayregistry-quay-database > backup.sql +$ oc -n exec quayregistry-quay-database-59f54bb7-58xs7 -- /usr/bin/pg_dump -C quayregistry-quay-database > backup.sql ---- -=== {productname} managed object storage backup +[id="backing-up-managed-object-storage"] +=== Backing up the {productname} managed object storage -The instructions in this section apply to the following configurations: +Use the following procedure to back up the {productname} managed object storage. The instructions in this section apply to the following configurations: * Standalone, multi-cloud object gateway configurations * OpenShift Data Foundations storage requires that the {productname} Operator provisioned an S3 object storage bucket from, through the ObjectStorageBucketClaim API @@ -200,27 +243,27 @@ The instructions in this section apply to the following configurations: If your {productname} deployment is configured with external (unmanged) object storage, refer to your vendor's documentation on how to create a copy of the content of Quay's storage bucket. ==== -. Decode and export the `AWS_ACCESS_KEY_ID`: +.Procedure + +. Decode and export the `AWS_ACCESS_KEY_ID` by entering the following command: + [source,terminal] ---- $ export AWS_ACCESS_KEY_ID=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_ACCESS_KEY_ID}' |base64 -d) ---- -. Decode and export the `AWS_SECRET_ACCESS_KEY_ID`: +. Decode and export the `AWS_SECRET_ACCESS_KEY_ID` by entering the following command: + [source,terminal] ---- $ export AWS_SECRET_ACCESS_KEY=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_SECRET_ACCESS_KEY}' |base64 -d) ---- -. Create a new directory and copy all blobs to it: +. Create a new directory: + [source,terminal] ---- $ mkdir blobs - -$ aws s3 sync --no-verify-ssl --endpoint https://$(oc get route s3 -n openshift-storage -o jsonpath='{.spec.host}') s3://$(oc get cm -l app=noobaa -n -o jsonpath='{.items[0].data.BUCKET_NAME}') ./blobs ---- [NOTE] @@ -228,9 +271,19 @@ $ aws s3 sync --no-verify-ssl --endpoint https://$(oc get route s3 -n openshift- You can also use link:https://rclone.org/[rclone] or link:https://s3tools.org/s3cmd[sc3md] instead of the AWS command line utility. ==== +. Copy all blobs to the directory by entering the following command: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl --endpoint https://$(oc get route s3 -n openshift-storage -o jsonpath='{.spec.host}') s3://$(oc get cm -l app=noobaa -n -o jsonpath='{.items[0].data.BUCKET_NAME}') ./blobs +---- + +[id="scaling-up-quay-deployment"] == Scale the {productname} deployment back up -. *For Operator version 3.7 and newer:* Scale up the {productname} deployment by re-enabling auto scaling, if desired, and removing the replica overrides for Quay, mirror workers and Clair as applicable. Your `QuayRegistry` resource should look similar to the following: +. Depending on the version of your {productname} deployment, scale up your deployment using one of the following options. + +.. *For Operator version 3.7 and newer:* Scale up the {productname} deployment by re-enabling auto scaling, if desired, and removing the replica overrides for Quay, mirror workers and Clair as applicable. Your `QuayRegistry` resource should look similar to the following: + [source,yaml] ---- @@ -255,18 +308,18 @@ spec: <1> Re-enables auto scaling of Quay, Clair and Mirroring workers again (if desired) <2> Replica overrides are removed again to scale the Quay components back up -. *For Operator version 3.6 and earlier:* Scale up the {productname} deployment by scaling up the {productname} Operator again: +.. *For Operator version 3.6 and earlier:* Scale up the {productname} deployment by scaling up the {productname} registry: + [source,terminal] ---- -$ oc scale --replicas=1 deployment $(oc get deployment -n | awk '/^quay-operator/ {print $1}') -n +$ oc scale --replicas=1 deployment $(oc get deployment -n | awk '/^quay-operator/ {print $1}') -n ---- -. Check the status of the {productname} deployment: +. Check the status of the {productname} deployment by entering the following command: + [source,terminal] ---- -$ oc wait quayregistry registry --for=condition=Available=true -n +$ oc wait quayregistry registry --for=condition=Available=true -n ---- + Example output: diff --git a/modules/backing-up-red-hat-quay-standalone.adoc b/modules/backing-up-red-hat-quay-standalone.adoc index b9061e5c8..89d413e99 100644 --- a/modules/backing-up-red-hat-quay-standalone.adoc +++ b/modules/backing-up-red-hat-quay-standalone.adoc @@ -102,6 +102,7 @@ DISTRIBUTED_STORAGE_CONFIG: s3_access_key: s3_secret_key: host: + s3_region: ---- . Export the `AWS_ACCESS_KEY_ID` by using the `access_key` credential obtained in Step 7: diff --git a/modules/branding-quay-deployment.adoc b/modules/branding-quay-deployment.adoc new file mode 100644 index 000000000..d5cc433dd --- /dev/null +++ b/modules/branding-quay-deployment.adoc @@ -0,0 +1,27 @@ +:_content-type: PROCEDURE +[id="branding-quay-deployment"] += Branding a {productname} deployment on the legacy UI + +You can brand the UI of your {productname} deployment by changing the registry title, logo, footer image, and by directing users to a website embedded in the footer image. + +.Procedure + +. Update your {productname} `config.yaml` file to add the following parameters: ++ +[source,yaml] +---- +BRANDING: + logo: <1> + footer_img: <2> + footer_url: <3> +--- +REGISTRY_TITLE: <4> +REGISTRY_TITLE_SHORT: <5> +---- +<1> The URL of the image that will appear at the top of your {productname} deployment. +<2> The URL of the image that will appear at the bottom of your {productname} deployment. +<3> The URL of the website that users will be directed to when clicking the footer image. +<4> The long-form title for the registry. This is displayed in frontend of your {productname} deployment, for example, at the sign in page of your organization. +<5> The short-form title for the registry. The title is displayed on various pages of your organization, for example, as the title of the tutorial on your organization's *Tutorial* page. + +. Restart your {productname} deployment. After restarting, your {productname} deployment is updated with a new logo, footer image, and footer image URL. \ No newline at end of file diff --git a/modules/build-enhanced-arch.adoc b/modules/build-enhanced-arch.adoc index 32f284c6b..f123f588a 100644 --- a/modules/build-enhanced-arch.adoc +++ b/modules/build-enhanced-arch.adoc @@ -1,7 +1,8 @@ -[[red-hat-quay-builds-architecture]] +:_content-type: PROCEDURE +[id="red-hat-quay-builds-architecture"] = {productname} enhanced build architecture -The preceding image shows the expected design flow and architecture of the enhanced build features: +The following image shows the expected design flow and architecture of the enhanced build features: image:quay-builds-architecture.png[Enhanced Quay builds architecture] diff --git a/modules/build-enhancements.adoc b/modules/build-enhancements.adoc index 238755827..ed740c5e4 100644 --- a/modules/build-enhancements.adoc +++ b/modules/build-enhancements.adoc @@ -1,6 +1,27 @@ -[[red-hat-quay-builders-enhancement]] -= {productname} build enhancements +:_content-type: PROCEDURE +[id="red-hat-quay-builders-enhancement"] += Virtual builds with {productname-ocp} -Prior to {productname} 3.7, Quay ran `podman` commands in virtual machines launched by pods. Running builds on virtual platforms requires enabling nested virtualization, which is not featured in Red Hat Enterprise Linux or OpenShift Container Platform. As a result, builds had to run on bare-metal clusters, which is an inefficient use of resources. +ifeval::["{context}" == "use-quay"] +Documentation for the _builds_ feature has been moved to link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/builders_and_image_automation/index[Builders and image automation]. This chapter will be removed in a future version of {productname}. +endif::[] -With {productname} 3.7., the bare-metal constraint required to run builds has been removed by adding an additional build option which does not contain the virtual machine layer. As a result, builds can be run on virtualized platforms. Backwards compatibility to run previous build configurations are also available. +ifeval::["{context}" == "operator-features"] +Documentation for the _builds_ feature has been moved to link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/builders_and_image_automation/index[Builders and image automation]. This chapter will be removed in a future version of {productname}. +endif::[] + +ifeval::["{context}" == "quay-builders-image-automation"] +The procedures in this section explain how to create an environment for _bare metal builds_ for {productname-ocp}. + + +_Virtual builds_ can be run on virtualized machines with {productname-ocp}. With this method, the _build manager_ first creates the `Job Object` resource. Then, the `Job Object` creates a pod using the `quay-builder-image`. The `quay-builder-image` contains the `quay-builder` binary and the Podman service. The created pod runs as `unprivileged`. The `quay-builder` binary then builds the image while communicating status and retrieving build information from the _build manager_. + +[id="quay-builds-limitations"] +== Virtual builds limitations + +The following limitations apply to the _virtual builds_ feature: + +* Running _virtual builds_ with {productname-ocp} in an unprivileged context might cause some commands that were working under the previous build strategy to fail. Attempts to change the build strategy could potentially cause performance issues and reliability with the build. + +* Running _virtual builds_ directly in a container does not have the same isolation as using virtual machines. Changing the build environment might also cause builds that were previously working to fail. +endif::[] diff --git a/modules/build-limitations.adoc b/modules/build-limitations.adoc index 57e611fb9..c1b6d8f02 100644 --- a/modules/build-limitations.adoc +++ b/modules/build-limitations.adoc @@ -1,6 +1,7 @@ -[[red-hat-quay-build-limitations]] +:_content-type: CONCEPT +[id="red-hat-quay-build-limitations"] = {productname} build limitations Running builds in {productname} in an unprivileged context might cause some commands that were working under the previous build strategy to fail. Attempts to change the build strategy could potentially cause performance issues and reliability with the build. -Running builds directly in a container will not have the same isolation as using virtual machines. Changing the build environment might also caused builds that were previously working to fail. +Running builds directly in a container does not have the same isolation as using virtual machines. Changing the build environment might also caused builds that were previously working to fail. diff --git a/modules/build-logs-not-loading.adoc b/modules/build-logs-not-loading.adoc new file mode 100644 index 000000000..34774312f --- /dev/null +++ b/modules/build-logs-not-loading.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="build-logs-not-loading"] += Build logs are not loading + +In some cases, attempting to load logs for a repository build results in only a throbber icon, and no logs are displayed. This typically occurs when you are using a browser equipped with one of the following extensions: AdBlock, uBlock, or Privacy Badger. These browser extensions can cause the loading of build logs to be cancelled. To resolve this issue, disable the browser extension and reload the page. \ No newline at end of file diff --git a/modules/build-pre-configuration.adoc b/modules/build-pre-configuration.adoc new file mode 100644 index 000000000..258a88ec4 --- /dev/null +++ b/modules/build-pre-configuration.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="build-pre-configuration"] += Setting up {productname} builders with {ocp} + +You must pre-configure {productname-ocp} to allow the use of the _builder_ workers before using the _builds_ feature. \ No newline at end of file diff --git a/modules/build-trigger-error.adoc b/modules/build-trigger-error.adoc new file mode 100644 index 000000000..2a7b5b3fc --- /dev/null +++ b/modules/build-trigger-error.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="build-trigger-error"] += Unable to add a build trigger + +In some cases, attempting to add a build trigger results in the following error message: `You are not admin on the SCM repository`. In order for {productname} to add the webhook callback necessary for Build Triggers, the user granting {productname} access to the SCM repository must have administrative access on that repository. \ No newline at end of file diff --git a/modules/build-trigger-overview.adoc b/modules/build-trigger-overview.adoc new file mode 100644 index 000000000..bda4be983 --- /dev/null +++ b/modules/build-trigger-overview.adoc @@ -0,0 +1,7 @@ +:_content-type: PROCEDURE +[id="build-trigger-overview"] += Build triggers + +_Build triggers_ are automated mechanisms that start a container image build when specific conditions are met, such as changes to source code, updates to dependencies, or link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/#webhook[creating a webhook call]. These triggers help automate the image-building process and ensure that the container images are always up-to-date without manual intervention. + +The following sections cover content related to creating a build trigger, tag naming conventions, how to skip a source control-triggered build, starting a _build_, or manually triggering a _build_. diff --git a/modules/builders-virtual-environment.adoc b/modules/builders-virtual-environment.adoc index 8c34eb6a8..8934a3086 100644 --- a/modules/builders-virtual-environment.adoc +++ b/modules/builders-virtual-environment.adoc @@ -1,106 +1,93 @@ -:_content-type: CONCEPT +:_content-type: PROCEDURE [id="builders-virtual-environment"] -= Creating a {productname} builders environment with {ocp} += Configuring virtual builds for {productname-ocp} -The procedures in this section explain how to create a {productname} virtual builders environment with {ocp}. - -[id="openshift-tls-component"] -== {ocp} TLS component - -The `tls` component allows you to control TLS configuration. +The procedures in this section explain how to create an environment for _virtual builds_ for {productname-ocp}. [NOTE] ==== -{productname} {producty} does not support builders when the TLS component is managed by the Operator. +* If you are using Amazon Web Service (AWS) S3 storage, you must modify your storage bucket in the AWS console, prior to running builders. See "Modifying your AWS S3 storage bucket" in the following section for the required parameters. +* If you are using a Google Cloud Platform (GCP) object bucket, you must configure cross-origin resource sharing (CORS) to enable _virtual builds_. ==== -If you set `tls` to `unmanaged`, you supply your own `ssl.cert` and `ssl.key` files. In this instance, if you want your cluster to support builders, you must add both the Quay route and the builder route name to the SAN list in the cert, or use a wildcard. - -To add the builder route, use the following format: - -[source,bash] ----- -[quayregistry-cr-name]-quay-builder-[ocp-namespace].[ocp-domain-name]:443 ----- - -[id="red-hat-quay-quota-builders-establishment"] -== Using {ocp} for {productname} builders +.Prerequisites -Builders require SSL/TLS certificates. For more information about SSL/TLS certificates, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/advanced_red_hat_quay_deployment#using_ssl_to_protect_connections_to_red_hat_quay[Adding TLS certificates to the {productname} container]. - -If you are using Amazon Web Service (AWS) S3 storage, you must modify your storage bucket in the AWS console, prior to running builders. See "Modifying your AWS S3 storage bucket" in the following section for the required parameters. - -[id="red-hat-quay-setting-up-builders"] -=== Preparing {ocp} for virtual builders - -Use the following procedure to prepare {ocp} for {productname} virtual builders. - -[NOTE] -==== -* This procedure assumes you already have a cluster provisioned and a Quay Operator running. -* This procedure is for setting up a virtual namespace on OpenShift Container Platform. -==== +* You have an {ocp} cluster provisioned with the {productname} Operator running. +* You have set the `tls` component to `unmanaged` and uploaded custom SSL/TLS certificates to the {productname} Operator. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#ssl-tls-quay-overview[SSL and TLS for {productname}]. +* You have configured the {ocp} TLS component for builds. +* You are logged into {ocp} as a cluster administrator. .Procedure -. Log in to your {productname} cluster using a cluster administrator account. - -. Create a new project where your virtual builders will be run, for example, `virtual-builders`, by running the following command: +. Create a new project where your virtual builders will be run, for example, `virtual-builders`, by running the following command: + [source,terminal] ---- $ oc new-project virtual-builders ---- -. Create a `ServiceAccount` in the project that will be used to run builds by entering the following command: +. Create a `ServiceAccount` in the project that will be used to run _builds_ by entering the following command: + [source,terminal] ---- $ oc create sa -n virtual-builders quay-builder ---- ++ +.Example output ++ +[source,terminal] +---- +serviceaccount/quay-builder created +---- -. Provide the created service account with editing permissions so that it can run the build: +. Provide the created service account with editing permissions so that it can run a _build_: + [source,terminal] ---- $ oc adm policy -n virtual-builders add-role-to-user edit system:serviceaccount:virtual-builders:quay-builder ---- ++ +.Example output ++ +[source,terminal] +---- +clusterrole.rbac.authorization.k8s.io/edit added: "system:serviceaccount:virtual-builders:quay-builder" +---- -. Grant the Quay builder `anyuid scc` permissions by entering the following command: +. Grant the _builder_ worker `anyuid scc` permissions by entering the following command. This requires cluster administrator privileges, which is required because _builders_ must run as the Podman user for unprivileged or rootless builds to work. + [source,terminal] ---- $ oc adm policy -n virtual-builders add-scc-to-user anyuid -z quay-builder ---- + -[NOTE] -==== -This action requires cluster admin privileges. This is required because builders must run as the Podman user for unprivileged or rootless builds to work. -==== - -. Obtain the token for the Quay builder service account. - -.. If using {ocp} 4.10 or an earlier version, enter the following command: +.Example output + [source,terminal] ---- -oc sa get-token -n virtual-builders quay-builder +clusterrole.rbac.authorization.k8s.io/system:openshift:scc:anyuid added: "quay-builder" ---- -.. If using {ocp} 4.11 or later, enter the following command: +. Obtain the token for the _builder_ service account by entering the following command: + [source,terminal] ---- $ oc create token quay-builder -n virtual-builders ---- + +[NOTE] +==== +When the token expires you will need to request a new token. Optionally, you can also add a custom expiration. For example, specify `--duration 20160m` to retain the token for two weeks. +==== ++ .Example output ++ [source,terminal] ---- eyJhbGciOiJSUzI1NiIsImtpZCI6IldfQUJkaDVmb3ltTHZ0dGZMYjhIWnYxZTQzN2dJVEJxcDJscldSdEUtYWsifQ... ---- -. Determine the builder route by entering the following command: +. Determine the _builder_ route by entering the following command: + [source,terminal] ---- @@ -110,13 +97,16 @@ $ oc get route -n quay-enterprise .Example output [source,terminal] ---- -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -... -example-registry-quay-builder example-registry-quay-builder-quay-enterprise.apps.docs.quayteam.org example-registry-quay-app grpc edge/Redirect None -... +NAME: example-registry-quay-builder +HOST/PORT: example-registry-quay-builder-quay-enterprise.apps.stevsmit-cluster-new.gcp.quaydev.org +PATH: +SERVICES: example-registry-quay-app +PORT: grpc +TERMINATION: passthrough/Redirect +WILDCARD: None ---- -. Generate a self-signed SSL/TlS certificate with the .crt extension by entering the following command: +. Generate a self-signed SSL/TlS certificate with the `.crt` extension by entering the following command: + [source,terminal] ---- @@ -124,19 +114,30 @@ $ oc extract cm/kube-root-ca.crt -n openshift-apiserver ---- + .Example output ++ [source,terminal] ---- ca.crt ---- -. Rename the `ca.crt` file to `extra_ca_cert_build_cluster.crt` by entering the following command: +. Rename the `ca.crt` file to `build-cluster.crt` by entering the following command: + [source,terminal] ---- -$ mv ca.crt extra_ca_cert_build_cluster.crt +$ mv ca.crt build-cluster.crt ---- -. Locate the secret for you configuration bundle in the *Console*, and select *Actions* -> *Edit Secret* and add the appropriate builder configuration: +. Update the `config.yaml` file of your {productname-ocp} deployment to include an appropriate _virtual builds_ configuration by using the {ocp} web console. + +.. Click *Operators* -> *Installed Operators* -> *Red Hat Quay* -> *Quay Registry*. + +.. Click the name of your registry, for example, *example-registry*. + +.. Under *Config Bundle Secret*, click the name of your configuration bundle, for example, *extra-ca-certificate-config-bundle-secret*. + +.. Click *Actions* -> *Edit Secret*. + +.. Add an appropriate _virtual builds_ configuration using the following as a reference: + [source,yaml] ---- @@ -163,37 +164,39 @@ BUILD_MANAGER: NAME: openshift BUILDER_NAMESPACE: <4> SETUP_TIME: 180 - MINIMUM_RETRY_THRESHOLD: - BUILDER_CONTAINER_IMAGE: <5> + MINIMUM_RETRY_THRESHOLD: 0 + BUILDER_CONTAINER_IMAGE: quay.io/projectquay/quay-builder:{producty} # Kubernetes resource options - K8S_API_SERVER: <6> - K8S_API_TLS_CA: <7> + K8S_API_SERVER: <5> + K8S_API_TLS_CA: <6> VOLUME_SIZE: 8G KUBERNETES_DISTRIBUTION: openshift - CONTAINER_MEMORY_LIMITS: 300m <8> - CONTAINER_CPU_LIMITS: 1G <9> - CONTAINER_MEMORY_REQUEST: 300m <10> - CONTAINER_CPU_REQUEST: 1G <11> + CONTAINER_MEMORY_LIMITS: 1G <7> + CONTAINER_CPU_LIMITS: 300m <8> + CONTAINER_MEMORY_REQUEST: 1G <9> + CONTAINER_CPU_REQUEST: 300m <10> NODE_SELECTOR_LABEL_KEY: "" NODE_SELECTOR_LABEL_VALUE: "" SERVICE_ACCOUNT_NAME: - SERVICE_ACCOUNT_TOKEN: <12> ----- -+ -<1> The build route is obtained by running `oc get route -n` with the name of your OpenShift Operator's namespace. A port must be provided at the end of the route, and it should use the following format: `[quayregistry-cr-name]-quay-builder-[ocp-namespace].[ocp-domain-name]:443`. -<2> If the `JOB_REGISTRATION_TIMEOUT` parameter is set too low, you might receive the following error: `failed to register job to build manager: rpc error: code = Unauthenticated desc = Invalid build token: Signature has expired`. It is suggested that this parameter be set to at least 240. -<3> If your Redis host has a password or SSL/TLS certificates, you must update accordingly. -<4> Set to match the name of your virtual builders namespace, for example, `virtual-builders`. -<5> For early access, the `BUILDER_CONTAINER_IMAGE` is currently `quay.io/projectquay/quay-builder:3.7.0-rc.2`. Note that this might change during the early access window. If this happens, customers are alerted. -<6> The `K8S_API_SERVER` is obtained by running `oc cluster-info`. -<7> You must manually create and add your custom CA cert, for example, `K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build_cluster.crt`. -<8> Defaults to `5120Mi` if left unspecified. -<9> For virtual builds, you must ensure that there are enough resources in your cluster. Defaults to `1000m` if left unspecified. -<10> Defaults to `3968Mi` if left unspecified. -<11> Defaults to `500m` if left unspecified. -<12> Obtained when running `oc create sa`. -+ -.Sample configuration + SERVICE_ACCOUNT_TOKEN: <11> + HTTP_PROXY: + HTTPS_PROXY: + NO_PROXY: +---- ++ +<1> The build route is obtained by running `$ oc get route -n` with the namespace of your {productname-ocp} deployment. A port must be provided at the end of the route, and it should use the following format: `[quayregistry-cr-name]-quay-builder-[ocp-namespace].[ocp-domain-name]:443`. +<2> If the `JOB_REGISTRATION_TIMEOUT` parameter is set too low, you might receive the following error: `failed to register job to build manager: rpc error: code = Unauthenticated desc = Invalid build token: Signature has expired`. This parameter should be set to at least `240`. +<3> If your Redis host has a password or SSL/TLS certificates, you must update this field accordingly. +<4> Set to match the name of your _virtual builds_ namespace. This example used `virtual-builders`. +<5> The `K8S_API_SERVER` is obtained by running `$ oc cluster-info`. +<6> You must manually create and add your custom CA cert, for example, `K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build-cluster.crt`. +<7> Defaults to `5120Mi` if left unspecified. +<8> For _virtual builds_, you must ensure that there are enough resources in your cluster. Defaults to `1000m` if left unspecified. +<9> Defaults to `3968Mi` if left unspecified. +<10> Defaults to `500m` if left unspecified. +<11> Obtained when running `$ oc create sa`. ++ +.Example _virtual builds_ configuration [source,yaml] ---- FEATURE_USER_INITIALIZE: true @@ -219,381 +222,26 @@ BUILD_MANAGER: NAME: openshift BUILDER_NAMESPACE: virtual-builders SETUP_TIME: 180 - MINIMUM_RETRY_THRESHOLD: - BUILDER_CONTAINER_IMAGE: quay.io/projectquay/quay-builder:3.7.0-rc.2 + MINIMUM_RETRY_THRESHOLD: 0 + BUILDER_CONTAINER_IMAGE: quay.io/projectquay/quay-builder:{producty} # Kubernetes resource options K8S_API_SERVER: api.docs.quayteam.org:6443 - K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build_cluster.crt + K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build-cluster.crt VOLUME_SIZE: 8G KUBERNETES_DISTRIBUTION: openshift CONTAINER_MEMORY_LIMITS: 1G - CONTAINER_CPU_LIMITS: 1080m + CONTAINER_CPU_LIMITS: 300m CONTAINER_MEMORY_REQUEST: 1G - CONTAINER_CPU_REQUEST: 580m + CONTAINER_CPU_REQUEST: 300m NODE_SELECTOR_LABEL_KEY: "" NODE_SELECTOR_LABEL_VALUE: "" SERVICE_ACCOUNT_NAME: quay-builder SERVICE_ACCOUNT_TOKEN: "eyJhbGciOiJSUzI1NiIsImtpZCI6IldfQUJkaDVmb3ltTHZ0dGZMYjhIWnYxZTQzN2dJVEJxcDJscldSdEUtYWsifQ" + HTTP_PROXY: + HTTPS_PROXY: + NO_PROXY: ---- -[id="red-hat-quay-manual-ssl-for-builders"] -=== Manually adding SSL/TLS certificates - -Due to a known issue with the configuration tool, you must manually add your custom SSL/TLS certificates to properly run builders. Use the following procedure to manually add custom SSL/TLS certificates. - -For more information creating SSL/TLS certificates, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/advanced_red_hat_quay_deployment#using_ssl_to_protect_connections_to_red_hat_quay[Adding TLS certificates to the {productname} container]. - - -[id="create-sign-certificates"] -==== Creating and signing certificates - -Use the following procedure to create and sign an SSL/TLS certificate. - -.Procedure - -* Create a certificate authority and sign a certificate. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/advanced_red_hat_quay_deployment#create-a-ca-and-sign-a-certificate[Create a Certificate Authority and sign a certificate]. -+ -.openssl.cnf -[source,terminal] ----- -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[req_distinguished_name] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -subjectAltName = @alt_names -[alt_names] -DNS.1 = example-registry-quay-quay-enterprise.apps.docs.quayteam.org <1> -DNS.2 = example-registry-quay-builder-quay-enterprise.apps.docs.quayteam.org <2> ----- -<1> An `alt_name` for the URL of your {productname} registry must be included. -<2> An `alt_name` for the `BUILDMAN_HOSTNAME` -+ -.Sample commands -[source,terminal] ----- -$ openssl genrsa -out rootCA.key 2048 -$ openssl req -x509 -new -nodes -key rootCA.key -sha256 -days 1024 -out rootCA.pem -$ openssl genrsa -out ssl.key 2048 -$ openssl req -new -key ssl.key -out ssl.csr -$ openssl x509 -req -in ssl.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out ssl.cert -days 356 -extensions v3_req -extfile openssl.cnf ----- - -[id="setting-tls-unmanaged"] -==== Setting TLS to unmanaged - -Use the following procedure to set `king:tls` to unmanaged. - -.Procedure - -. In your {productname} Registry YAML, set `kind: tls` to `managed: false`: -+ -[source,yaml] ----- - - kind: tls - managed: false ----- - -. On the *Events* page, the change is blocked until you set up the appropriate `config.yaml` file. For example: -+ -[source,yaml] ----- - - lastTransitionTime: '2022-03-28T12:56:49Z' - lastUpdateTime: '2022-03-28T12:56:49Z' - message: >- - required component `tls` marked as unmanaged, but `configBundleSecret` - is missing necessary fields - reason: ConfigInvalid - status: 'True' - ----- - -[id="creating-temporary-secrets"] -==== Creating temporary secrets - -Use the following procedure to create temporary secrets for the CA certificate. - -.Procedure - -. Create a secret in your default namespace for the CA certificate: -+ ----- -$ oc create secret generic -n quay-enterprise temp-crt --from-file extra_ca_cert_build_cluster.crt ----- - -. Create a secret in your default namespace for the `ssl.key` and `ssl.cert` files: -+ ----- -$ oc create secret generic -n quay-enterprise quay-config-ssl --from-file ssl.cert --from-file ssl.key ----- - -[id="copying-secret-data-to-config"] -==== Copying secret data to the configuration YAML - -Use the following procedure to copy secret data to your `config.yaml` file. - -.Procedure - -. Locate the new secrets in the console UI at *Workloads* -> *Secrets*. - -. For each secret, locate the YAML view: -+ -[source,yaml] ----- -kind: Secret -apiVersion: v1 -metadata: - name: temp-crt - namespace: quay-enterprise - uid: a4818adb-8e21-443a-a8db-f334ace9f6d0 - resourceVersion: '9087855' - creationTimestamp: '2022-03-28T13:05:30Z' -... -data: - extra_ca_cert_build_cluster.crt: >- - LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURNakNDQWhxZ0F3SUJBZ0l.... -type: Opaque ----- -+ -[source,yaml] ----- -kind: Secret -apiVersion: v1 -metadata: - name: quay-config-ssl - namespace: quay-enterprise - uid: 4f5ae352-17d8-4e2d-89a2-143a3280783c - resourceVersion: '9090567' - creationTimestamp: '2022-03-28T13:10:34Z' -... -data: - ssl.cert: >- - LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVaakNDQTA2Z0F3SUJBZ0lVT... - ssl.key: >- - LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBc... -type: Opaque ----- +.. Click *Save* on the *Edit Secret* page. -. Locate the secret for your {productname} registry configuration bundle in the UI, or through the command line by running a command like the following: -+ -[source,terminal] ----- -$ oc get quayregistries.quay.redhat.com -o jsonpath="{.items[0].spec.configBundleSecret}{'\n'}" -n quay-enterprise ----- - -. In the {ocp} console, select the YAML tab for your configuration bundle secret, and add the data from the two secrets you created: -+ -[source,yaml] ----- -kind: Secret -apiVersion: v1 -metadata: - name: init-config-bundle-secret - namespace: quay-enterprise - uid: 4724aca5-bff0-406a-9162-ccb1972a27c1 - resourceVersion: '4383160' - creationTimestamp: '2022-03-22T12:35:59Z' -... -data: - config.yaml: >- - RkVBVFVSRV9VU0VSX0lOSVRJQUxJWkU6IHRydWUKQlJ... - extra_ca_cert_build_cluster.crt: >- - LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURNakNDQWhxZ0F3SUJBZ0ldw.... - ssl.cert: >- - LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVaakNDQTA2Z0F3SUJBZ0lVT... - ssl.key: >- - LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBc... -type: Opaque ----- - -. Click *Save*. - -. Enter the following command to see if your pods are restarting: -+ -[source,terminal] ----- -$ oc get pods -n quay-enterprise ----- -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE -... -example-registry-quay-app-6786987b99-vgg2v 0/1 ContainerCreating 0 2s -example-registry-quay-app-7975d4889f-q7tvl 1/1 Running 0 5d21h -example-registry-quay-app-7975d4889f-zn8bb 1/1 Running 0 5d21h -example-registry-quay-app-upgrade-lswsn 0/1 Completed 0 6d1h -example-registry-quay-config-editor-77847fc4f5-nsbbv 0/1 ContainerCreating 0 2s -example-registry-quay-config-editor-c6c4d9ccd-2mwg2 1/1 Running 0 5d21h -example-registry-quay-database-66969cd859-n2ssm 1/1 Running 0 6d1h -example-registry-quay-mirror-764d7b68d9-jmlkk 1/1 Terminating 0 5d21h -example-registry-quay-mirror-764d7b68d9-jqzwg 1/1 Terminating 0 5d21h -example-registry-quay-redis-7cc5f6c977-956g8 1/1 Running 0 5d21h ----- - -. After your {productname} registry has reconfigured, enter the following command to check if the {productname} app pods are running: -+ -[source,terminal] ----- -$ oc get pods -n quay-enterprise ----- -+ -.Example output -[source,terminal] ----- -example-registry-quay-app-6786987b99-sz6kb 1/1 Running 0 7m45s -example-registry-quay-app-6786987b99-vgg2v 1/1 Running 0 9m1s -example-registry-quay-app-upgrade-lswsn 0/1 Completed 0 6d1h -example-registry-quay-config-editor-77847fc4f5-nsbbv 1/1 Running 0 9m1s -example-registry-quay-database-66969cd859-n2ssm 1/1 Running 0 6d1h -example-registry-quay-mirror-758fc68ff7-5wxlp 1/1 Running 0 8m29s -example-registry-quay-mirror-758fc68ff7-lbl82 1/1 Running 0 8m29s -example-registry-quay-redis-7cc5f6c977-956g8 1/1 Running 0 5d21h ----- - -. In your browser, access the registry endpoint and validate that the certificate has been updated appropriately. For example: -+ -[source,terminal] ----- -Common Name (CN) example-registry-quay-quay-enterprise.apps.docs.quayteam.org -Organisation (O) DOCS -Organisational Unit (OU) QUAY ----- - -[id="red-hat-quay-builders-ui"] -=== Using the UI to create a build trigger - -Use the following procedure to use the UI to create a build trigger. - -.Procedure - -. Log in to your {productname} repository. - -. Click *Create New Repository* and create a new registry, for example, `testrepo`. - -. On the *Repositories* page, click the *Builds* tab on the navigation pane. Alternatively, use the corresponding URL directly: -+ ----- -https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/repository/quayadmin/testrepo?tab=builds ----- -+ -[IMPORTANT] -==== -In some cases, the builder might have issues resolving hostnames. This issue might be related to the `dnsPolicy` being set to `default` on the job object. Currently, there is no workaround for this issue. It will be resolved in a future version of {productname}. -==== - -. Click *Create Build Trigger* -> *Custom Git Repository Push*. - -. Enter the HTTPS or SSH style URL used to clone your Git repository, then click *Continue*. For example: -+ ----- -https://github.com/gabriel-rh/actions_test.git ----- - -. Check *Tag manifest with the branch or tag name* and then click *Continue*. - -. Enter the location of the Dockerfile to build when the trigger is invoked, for example, `/Dockerfile` and click *Continue*. - -. Enter the location of the context for the Docker build, for example, `/`, and click *Continue*. - -. If warranted, create a Robot Account. Otherwise, click *Continue*. - -. Click *Continue* to verify the parameters. - -. On the *Builds* page, click *Options* icon of your Trigger Name, and then click *Run Trigger Now*. - -. Enter a commit SHA from the Git repository and click *Start Build*. - -. You can check the status of your build by clicking the commit in the *Build History* page, or by running `oc get pods -n virtual-builders`. For example: -+ ----- -$ oc get pods -n virtual-builders ----- -+ -.Example output ----- -NAME READY STATUS RESTARTS AGE -f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s ----- -+ -[source,terminal] ----- -$ oc get pods -n virtual-builders ----- -+ -.Example output ----- -NAME READY STATUS RESTARTS AGE -f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Terminating 0 9s ----- -+ ----- -$ oc get pods -n virtual-builders ----- -+ -.Example output ----- -No resources found in virtual-builders namespace. ----- - -. When the build is finished, you can check the status of the tag under *Tags* on the navigation pane. -+ -[NOTE] -==== -With early access, full build logs and timestamps of builds are currently unavailable. -==== - - -[id="red-hat-quay-s3-bucket-modify"] -=== Modifying your AWS S3 storage bucket - -If you are using AWS S3 storage, you must change your storage bucket in the AWS console, prior to running builders. - -.Procedure - -. Log in to your AWS console at link:https://s3.console.aws.amazon.com[s3.console.aws.com]. - -. In the search bar, search for `S3` and then click *S3*. - -. Click the name of your bucket, for example, `myawsbucket`. - -. Click the *Permissions* tab. - -. Under *Cross-origin resource sharing (CORS)*, include the following parameters: -+ -[source,yaml] ----- - [ - { - "AllowedHeaders": [ - "Authorization" - ], - "AllowedMethods": [ - "GET" - ], - "AllowedOrigins": [ - "*" - ], - "ExposeHeaders": [], - "MaxAgeSeconds": 3000 - }, - { - "AllowedHeaders": [ - "Content-Type", - "x-amz-acl", - "origin" - ], - "AllowedMethods": [ - "PUT" - ], - "AllowedOrigins": [ - "*" - ], - "ExposeHeaders": [], - "MaxAgeSeconds": 3000 - } - ] ----- +. Restart your {productname-ocp} registry with the new configuration. \ No newline at end of file diff --git a/modules/builds-overview.adoc b/modules/builds-overview.adoc new file mode 100644 index 000000000..8a6a89ada --- /dev/null +++ b/modules/builds-overview.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="builds-overview"] += {productname} builds overview + +_{productname} builds_, or just _builds_, are a feature that enable the automation of container image builds. The _builds_ feature uses worker nodes to build images from Dockerfiles or other build specifications. These builds can be triggered manually or automatically via webhooks from repositories like GitHub, allowing users to integrate continuous integration (CI) and continuous delivery (CD) pipelines into their workflow. + +The _builds_ feature is supported on {productname-ocp} and Kubernetes clusters. For Operator-based deployments and Kubernetes clusters, _builds_ are created by using a _build manager_ that coordinates and handles the build jobs. _Builds_ support building Dockerfile on both bare metal platforms and on virtualized platforms with _virtual builders_. This versatility allows organizations to adapt to existing infrastructure while leveraging {productname}'s container image build capabilities. + +The key features of _{productname} builds_ feature include: + +* Automated builds triggered by code commits or version control events +* Support for Docker and Podman container images +* Fine-grained control over build environments and resources +* Integration with Kubernetes and {ocp} for scalable builds +* Compatibility with bare metal and virtualized infrastructure + +[NOTE] +==== +Running _builds_ directly in a container on bare metal platforms does not have the same isolation as when using virtual machines, however, it still provides good protection. +==== + +_Builds_ are highly complex, and administrators are encouraged to review the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_architecture/index#arch-intro-build-automation[Build automation] architecture guide before continuing. \ No newline at end of file diff --git a/modules/cannot-access-private-repo.adoc b/modules/cannot-access-private-repo.adoc new file mode 100644 index 000000000..35b20d351 --- /dev/null +++ b/modules/cannot-access-private-repo.adoc @@ -0,0 +1,26 @@ +:_content-type: CONCEPT +[id="cannot-access-private-repo"] += Unable to access private repositories using Amazon EC2 Container Service + +In some cases, authentication fails while attempting to use Amazon Elastic Container Service (ECS). This error occurs when the authentication configuration in the `ecs.config` file is missing. + +In order for ECS to pull down Docker images, the following information must be included in the ECS configuration file that is located in the `/etc/ecs/ecs.conf` file: + +[source,yaml] +---- +ECS_ENGINE_AUTH_TYPE=dockercfg +ECS_ENGINE_AUTH_DATA={"https://quay.io": {"auth": "YOURAUTHTOKENFROMDOCKERCFG", "email": "user@example.com"}} +---- + +If you are using a robot account, you must include the username: + +[source,terminal] +---- +ECS_ENGINE_AUTH_TYPE=dockercfg <1> +ECS_ENGINE_AUTH_DATA={"https://quay.io": {"auth": "YOURAUTHTOKENFROMDOCKERCFG", "email": ".", "username": "USERNAME"}} +---- +<1> This field is the contents of the `auths` attribute in `.docker/config.json` starting at Docker version 1.7.0, or the contents of `.dockercfg` before that. + +After you have updated the configuration file, restart the ECS service. + +For more information about ECS, see link:https://docs.aws.amazon.com/AmazonECS/latest/developerguide/private-auth.html[Private registry authentication for tasks]. \ No newline at end of file diff --git a/modules/cannot-locate-dockerfile.adoc b/modules/cannot-locate-dockerfile.adoc new file mode 100644 index 000000000..b57f663bf --- /dev/null +++ b/modules/cannot-locate-dockerfile.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="cannot-locate-dockerfile"] += Unable to locate specified Dockerfile + +When building an image, the following error is returned: `A build step failed: API error (500): Cannot locate specified Dockerfile: Dockerfile`. This occurs for one of two reasons: + +* *The `.dockerignore` file contains the Dockerfile.* Unlike Docker Hub, the Dockerfile is part of the Build Context on {productname}. The Dockerfile must not appear in the `.dockerignore` file. Remove the Dockerfile from the `.dockerignore` file to resolve the issue. + +* *The build trigger is incorrect.* Verify the Dockerfile location and the branch or tag value specified in the build trigger. \ No newline at end of file diff --git a/modules/cannot-reach-registry-endpoint.adoc b/modules/cannot-reach-registry-endpoint.adoc new file mode 100644 index 000000000..870cfc140 --- /dev/null +++ b/modules/cannot-reach-registry-endpoint.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="cannot-reach-registry-endpoint"] += Unable to reach registry endpoint + +In some cases, trying to pull a Docker image returns the following error: `Could not reach any registry endpoint`. This usually occurs because you are attempting to pull a non-existent tag. If you do not specify a tag, newer version of Docker attempt to pull the "latest" tag, regardless of whether it actually exists. \ No newline at end of file diff --git a/modules/changing-storage-solution.adoc b/modules/changing-storage-solution.adoc new file mode 100644 index 000000000..924c071d3 --- /dev/null +++ b/modules/changing-storage-solution.adoc @@ -0,0 +1,12 @@ +:_content-type: PROCEDURE +[id="changing-storage-solution"] += Unable to change storage solution for Quay pods + +In some cases, there are 2 persistent volume claims (PVCs) present in the `Quay` namespace, and the `Quay` pod is binding to the incorrect PVC instead of the expected one. When attempting to manually change the PVC to the desired storage solution, it might revert back to the incorrect storage solution. Because the storage class used by the local PVC is automatically set as the local PVC, your local PVC is selected over the {productname} PVC. + +As a workaround for this issue, you can change the default storage class to one that controls the desired persistent volume claim. Afterwards, the {productname} Operator, by default, refers to the PVC created by the default storage class. See the {ocp} documentation for link:https://docs.openshift.com/container-platform/{ocp-y}/storage/dynamic-provisioning.html#change-default-storage-class_dynamic-provisioning[Changing the default storage class] to resolve this issue. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6202532[Can't change Storage Solution for Quay pods]. \ No newline at end of file diff --git a/modules/clair-concepts.adoc b/modules/clair-concepts.adoc index 835535f77..fecc969db 100644 --- a/modules/clair-concepts.adoc +++ b/modules/clair-concepts.adoc @@ -16,12 +16,18 @@ A Clair analysis is broken down into three distinct parts: indexing, matching, a [id="clair-indexing-concept"] === Indexing -Clair's indexer service is responsible for indexing a manifest. In Clair, manifests are representations of a container image. The indexer service is the component that Clair uses to understand the contents of layers. Clair leverages the fact that Open Container Initiative (OCI) manifests and layers are content addressed to reduce duplicate work. +Clair's indexer service plays a crucial role in understanding the makeup of a container image. In Clair, container image representations called "manifests." Manifests are used to comprehend the contents of the image's layers. To streamline this process, Clair takes advantage of the fact that Open Container Initiative (OCI) manifests and layers are designed for content addressing, reducing repetitive tasks. -Indexing involves taking a manifest representing a container image and computing its constituent parts. The indexer tries to discover what packages exist in the image, what distribution the image is derived from, and what package repositories are used within the image. When this information is computed, it is persisted into an `IndexReport`. +During indexing, a manifest that represents a container image is taken and broken down into its essential components. The indexer's job is to uncover the image's contained packages, its origin distribution, and the package repositories it relies on. This valuable information is then recorded and stored within Clair's database. The insights gathered during indexing serve as the basis for generating a comprehensive vulnerability report. This report can be seamlessly transferred to a matcher node for further analysis and action, helping users make informed decisions about their container images' security. +ifeval::["{context}" == "quay-io"] +The `IndexReport` is stored in Clair's database. In {quayio}, it is automatically fed to a `matcher` node to compute the vulnerability report after an image is pushed to a repository. +endif::[] +ifeval::["{context}" == "clair"] The `IndexReport` is stored in Clair's database. It can be fed to a `matcher` node to compute the vulnerability report. +endif::[] +//// [id="content-addressability"] ==== Content addressability @@ -31,19 +37,31 @@ For example, consider how many images in a registry might use `ubuntu:artful` as In some cases, Clair should re-index a manifest. For example, when an internal component such as a package scanner is updated, Clair performs the analysis with the new package scanner. Clair has enough information to determine that a component has changed and that the `IndexReport` might be different the second time, and as a result it re-indexes the manifest. +ifeval::["{context}" == "clair"] A client can track Clair's `index_state` endpoint to understand when an internal component has changed, and can subsequently issue re-indexes. See the Clair API guide to learn how to view Clair's API specification. +//// [id="clair-matching-concept"] === Matching -With Clair, a matcher node is responsible for matching vulnerabilities to a provided `IndexReport`. +With Clair, a matcher node is responsible for matching vulnerabilities to a provided index report. -Matchers are responsible for keeping the database of vulnerabilities up to date. Matchers will typically run a set of updaters, which periodically probe their data sources for new content. New vulnerabilities are stored in the database when they are discovered. +Matchers are responsible for keeping the database of vulnerabilities up to date. Matchers run a set of updaters, which periodically probe their data sources for new content. New vulnerabilities are stored in the database when they are discovered. +The matcher API is designed to always provide the most recent vulnerability report when queried. The vulnerability report summarizes both a manifest's content and any vulnerabilities affecting the content. + +ifeval::["{context}" == "quay-io"] +On {quayio}, this interval is set to 6 hours. +endif::[] +New vulnerabilities are stored in the database when they are discovered. + +ifeval::["{context}" == "clair"] The matcher API is designed to be used often. It is designed to always provide the most recent `VulnerabilityReport` when queried. The `VulnerabilityReport` summarizes both a manifest's content and any vulnerabilities affecting the content. +endif::[] // See. . . to learn more about how to view the Clair API specification and to work with the matcher API. +//// [id="remote-matching"] ==== Remote matching @@ -52,16 +70,22 @@ A remote matcher acts similar to a matcher, however remote matchers use API call The CRDA remote matcher is responsible for fetching vulnerabilities from Red Hat Code Ready Dependency Analytics (CRDA). By default, this matcher serves 100 requests per minute. The rate limiting can be lifted by requesting a dedicated API key, which is done by submitting link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form]. To enable CRDA remote matching, see "Enabling CRDA for Clair". +//// [id="clair-notifications-concept"] -=== Notifications +=== Notifier service +ifeval::["{context}" == "quay-io"] +By default, the notifier service on {quayio} is disabled. As a result, repository owners cannot setup notifications when new CVEs are reported. However, when CVE databases are updated, which is every 6 hours on {quayio}, new vulnerabilities affecting previously indexed manifests are automatically updated. As a result, manual re-scans are not required, and users can view new CVEs directly on {quayio}. See "Viewing Clair security scans" for more information. +endif::[] + +ifeval::["{context}" == "clair"] Clair uses a notifier service that keeps track of new security database updates and informs users if new or removed vulnerabilities affect an indexed manifest. When the notifier becomes aware of new vulnerabilities affecting a previously indexed manifest, it uses the configured methods in your `config.yaml` file to issue notifications about the new changes. Returned notifications express the most severe vulnerability discovered because of the change. This avoids creating excessive notifications for the same security database update. When a user receives a notification, it issues a new request against the matcher to receive an up to date vulnerability report. - +//// The notification schema is the JSON marshalled form of the following types: [source,json] @@ -90,6 +114,7 @@ type VulnSummary struct { Links string `json:"links"` } ---- +//// You can subscribe to notifications through the following mechanics: @@ -99,6 +124,7 @@ You can subscribe to notifications through the following mechanics: Configuring the notifier is done through the Clair YAML configuration file. +//// [id=webhook-delivery] ==== Webhook delivery @@ -112,7 +138,7 @@ When the notifier has determined an updated security database has been changed t [source,json] ---- { - "notifiction_id": {uuid_string}, + "notification_id": {uuid_string}, "callback": {url_to_notifications} } ---- @@ -149,5 +175,7 @@ The notifier runs in `NOTIFIER_TEST_MODE` until the environment variable is clea ==== Deleting notifications To delete the notification, you can use the `DELETE` API call. Deleting a notification ID manually cleans up resources in the notifier. If you do not use the `DELETE` API call, the notifier waits a predetermined length of time before clearing delivered notifications from its database. +endif::[] -// For more information on the `DELETE` API call, see. . . \ No newline at end of file +// For more information on the `DELETE` API call, see. . . +//// \ No newline at end of file diff --git a/modules/clair-distroless-container-images.adoc b/modules/clair-distroless-container-images.adoc new file mode 100644 index 000000000..b728a2c4d --- /dev/null +++ b/modules/clair-distroless-container-images.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="clair-distroless-container-images"] += Does Clair supporting scanning of disto-less container images? + +Support for scanning distro-less containers was added in Clair 4.6.1. This feature is not present in earlier versions. For Clair on the {productname} Operator, this feature was released with {productname} 3.8.7. \ No newline at end of file diff --git a/modules/clair-openshift-airgap-database-standalone.adoc b/modules/clair-openshift-airgap-database-standalone.adoc index cfd03e0e0..535e5c59d 100644 --- a/modules/clair-openshift-airgap-database-standalone.adoc +++ b/modules/clair-openshift-airgap-database-standalone.adoc @@ -47,17 +47,18 @@ $ oc port-forward -n quay-enterprise service/example-registry-clair-postgres 543 ---- indexer: connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable <1> - scanlock_retry: 10 layer_scan_concurrency: 5 migrations: true + scanlock_retry: 10 + airgap: true scanner: - repo: - rhel-repository-scanner: <2> - repo2cpe_mapping_file: /data/cpe-map.json - package: - rhel_containerscanner: <3> - name2repos_mapping_file: /data/repo-map.json + repo: + rhel-repository-scanner: <2> + repo2cpe_mapping_file: /data/repository-to-cpe.json + package: + rhel_containerscanner: <3> + name2repos_mapping_file: /data/container-name-repos-map.json ---- <1> Replace the value of the `host` in the multiple `connstring` fields with `localhost`. <2> For more information about the `rhel-repository-scanner` parameter, see "Mapping repositories to Common Product Enumeration information". -<3> For more information about the `rhel_containerscanner` parameter, see "Mapping repositories to Common Product Enumeration information". \ No newline at end of file +<3> For more information about the `rhel_containerscanner` parameter, see "Mapping repositories to Common Product Enumeration information". diff --git a/modules/clair-openshift-airgap-database.adoc b/modules/clair-openshift-airgap-database.adoc index c0ca82b1e..6f1de127e 100644 --- a/modules/clair-openshift-airgap-database.adoc +++ b/modules/clair-openshift-airgap-database.adoc @@ -47,16 +47,17 @@ $ oc port-forward -n quay-enterprise service/example-registry-clair-postgres 543 ---- indexer: connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable <1> - scanlock_retry: 10 layer_scan_concurrency: 5 migrations: true + scanlock_retry: 10 + airgap: true scanner: - repo: - rhel-repository-scanner: <2> - repo2cpe_mapping_file: /data/cpe-map.json - package: - rhel_containerscanner: <3> - name2repos_mapping_file: /data/repo-map.json + repo: + rhel-repository-scanner: <2> + repo2cpe_mapping_file: /data/repository-to-cpe.json + package: + rhel_containerscanner: <3> + name2repos_mapping_file: /data/container-name-repos-map.json ---- <1> Replace the value of the `host` in the multiple `connstring` fields with `localhost`. <2> For more information about the `rhel-repository-scanner` parameter, see "Mapping repositories to Common Product Enumeration information". diff --git a/modules/clair-openshift-manual.adoc b/modules/clair-openshift-manual.adoc index 4610a5e48..a7f990dfe 100644 --- a/modules/clair-openshift-manual.adoc +++ b/modules/clair-openshift-manual.adoc @@ -121,7 +121,6 @@ indexer: matcher: connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable max_conn_pool: 100 - run: "" migrations: true indexer_addr: clair-indexer notifier: @@ -138,7 +137,8 @@ trace: name: "jaeger" probability: 1 jaeger: - agent_endpoint: "localhost:6831" + agent: + endpoint: "localhost:6831" service_name: "clair" metrics: name: "prometheus" @@ -236,6 +236,7 @@ $ oc create -f ./clair-combo.yaml ---- FEATURE_SECURITY_NOTIFICATIONS: true FEATURE_SECURITY_SCANNER: true +FEATURE_SECURITY_SCANNER_NOTIFY_ON_NEW_INDEX: true SECURITY_SCANNER_V4_ENDPOINT: <1> SECURITY_SCANNER_V4_PSK: <2> ---- diff --git a/modules/clair-openshift.adoc b/modules/clair-openshift.adoc index cd4cb3f17..401ba8a27 100644 --- a/modules/clair-openshift.adoc +++ b/modules/clair-openshift.adoc @@ -6,4 +6,4 @@ [id="clair-quay-operator-overview"] = Clair on {ocp} -To set up Clair v4 (Clair) on a {productname} deployment on {ocp}, it is recommended to use the {productname} Operator. By default, the {productname} Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair automatically. +To set up Clair v4 (Clair) on a {productname} deployment on {ocp}, it is recommended to use the {productname} Operator. By default, the {productname} Operator installs or upgrades a Clair deployment along with your {productname} deployment and configure Clair automatically. diff --git a/modules/clair-postgresql-database-update.adoc b/modules/clair-postgresql-database-update.adoc new file mode 100644 index 000000000..c9b4e49ee --- /dev/null +++ b/modules/clair-postgresql-database-update.adoc @@ -0,0 +1,99 @@ +[id="upgrading-clair-postgresql-database"] += Upgrading the Clair PostgreSQL database + +If you are upgrading {productname} to version 13, you must migrate your Clair PostgreSQL database version from PostgreSQL version 13 -> version 15. This requires bringing down your Clair PostgreSQL 13 database and running a migration script to initiate the process. + +Use the following procedure to upgrade your Clair PostgreSQL database from version 13 -> to version 15. + +[IMPORTANT] +==== +Clair security scans might become temporarily disrupted after the migration procedure has succeeded. +==== + +.Procedure + +. Stop the {productname} container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Stop the Clair container by running the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Run the following Podman process from SCLOrg's link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration] procedure, which allows for data migration from a remote PostgreSQL server: ++ +[source,terminal] +---- +$ sudo podman run -d --name <1> + -e POSTGRESQL_MIGRATION_REMOTE_HOST= \ <2> + -e POSTGRESQL_MIGRATION_ADMIN_PASSWORD=remoteAdminP@ssword \ + -v \ <3> + [ OPTIONAL_CONFIGURATION_VARIABLES ] + registry.redhat.io/rhel8/postgresql-15 +---- ++ +<1> Insert a name for your Clair PostgreSQL 15 migration database. +<2> Your new Clair PostgreSQL 15 database container IP address. Can obtained by running the following command: `sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay`. +<3> You must specify a different volume mount point than the one from your initial Clair PostgreSQL 13 deployment, and modify the access control lists for said directory. For example: ++ +[source,terminal] +---- +$ mkdir -p /host/data/clair-postgresql15-directory +---- ++ +[source,terminal] +---- +$ setfacl -m u:26:-wx /host/data/clair-postgresql15-directory +---- ++ +This prevents data from being overwritten by the new container. + +. Stop the Clair PostgreSQL 13 container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. After completing the PostgreSQL migration, run the Clair PostgreSQL 15 container, using the new data volume mount from Step 3, for example, ``: ++ +[source,terminal] +---- +$ sudo podman run -d --rm --name \ + -e POSTGRESQL_USER= \ + -e POSTGRESQL_PASSWORD= \ + -e POSTGRESQL_DATABASE= \ + -e POSTGRESQL_ADMIN_PASSWORD= \ + -p 5433:5432 \ + -v \ + registry.redhat.io/rhel8/postgresql-15 +---- + +. Start the {productname} container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 --name=quay \ +-v /home//quay-poc/config:/conf/stack:Z \ +-v /home//quay-poc/storage:/datastorage:Z \ +{productrepo}/{quayimage}:{productminv} +---- + +. Start the Clair container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +registry.redhat.io/quay/clair-rhel8:{productminv} +---- + +For more information, see link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration]. \ No newline at end of file diff --git a/modules/clair-severity-mapping.adoc b/modules/clair-severity-mapping.adoc new file mode 100644 index 000000000..0d3865f29 --- /dev/null +++ b/modules/clair-severity-mapping.adoc @@ -0,0 +1,165 @@ + +// Module included in the following assemblies: +// +// clair/master.adoc +//quayio/master.adoc + +:_content-type: CONCEPT +[id="clair-severity-mapping"] += Clair severity mapping + +Clair offers a comprehensive approach to vulnerability assessment and management. One of its essential features is the normalization of security databases' severity strings. This process streamlines the assessment of vulnerability severities by mapping them to a predefined set of values. Through this mapping, clients can efficiently react to vulnerability severities without the need to decipher the intricacies of each security database's unique severity strings. These mapped severity strings align with those found within the respective security databases, ensuring consistency and accuracy in vulnerability assessment. + + +[id="clair-severity-strings"] +== Clair severity strings + +Clair alerts users with the following severity strings: + +* Unknown +* Negligible +* Low +* Medium +* High +* Critical + +These severity strings are similar to the strings found within the relevant security database. + +[discrete] +[id="clair-mapping-alpine"] +=== Alpine mapping + +Alpine SecDB database does not provide severity information. All vulnerability severities will be Unknown. + +[cols="1,1",options="header"] +|=== +| Alpine Severity | Clair Severity +| * |Unknown + +|=== + +[discrete] +[id="clair-mapping-aws"] +=== AWS mapping + +AWS UpdateInfo database provides severity information. + +[cols="1,1",options="header"] +|=== +| AWS Severity | Clair Severity +|low |Low +|medium |Medium +|important | High +|critical | Critical +|=== + +[discrete] +[id="clair-mapping-debian"] +=== Debian mapping + +Debian Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| Debian Severity | Clair Severity +| * | Unknown +|Unimportant | Low +| Low | Medium +| Medium | High +| High | Critical +|=== + +[discrete] +[id="clair-mapping-oracle"] +=== Oracle mapping + +Oracle Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| Oracle Severity | Clair Severity +|N/A | Unknown +|LOW | Low +|MODERATE | Medium +|IMPORTANT | High +|CRITICAL | Critical + +|=== + +[discrete] +[id="clair-mapping-rhel"] +=== RHEL mapping + +RHEL Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| RHEL Severity | Clair Severity +|None | Unknown +|Low | Low +|Moderate | Medium +|Important | High +|Critical | Critical + +|=== + +[discrete] +[id="clair-mapping-suse"] +=== SUSE mapping + +SUSE Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| Severity | Clair Severity +|None | Unknown +|Low | Low +|Moderate | Medium +|Important | High +|Critical | Critical +|=== + +[discrete] +[id="clair-mapping-ubuntu"] +=== Ubuntu mapping + +Ubuntu Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| Severity | Clair Severity +|Untriaged |Unknown +|Negligible | Negligible +|Low | Low +|Medium | Medium +|High | High +|Critical | Critical +|=== + +[discrete] +[id="clair-mapping-osv"] +=== OSV mapping + +.CVSSv3 +[cols="2,2",options="header"] +|=== +| Base Score | Clair Severity +|0.0 | Negligible +|0.1-3.9 | Low +|4.0-6.9 | Medium +|7.0-8.9 | High +|9.0-10.0 | Critical + +|=== + +.CVSSv2 + +[cols="2,2",options="header"] +|=== +| Base Score | Clair Severity +|0.0-3.9 | Low +|4.0-6.9 | Medium +|7.0-10 | High + +|=== + diff --git a/modules/clair-standalone-config.adoc b/modules/clair-standalone-config.adoc index 844258b6a..7fa219cd3 100644 --- a/modules/clair-standalone-config.adoc +++ b/modules/clair-standalone-config.adoc @@ -18,7 +18,6 @@ indexer: matcher: connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable max_conn_pool: 100 - run: "" migrations: true indexer_addr: clair-indexer notifier: @@ -35,7 +34,8 @@ trace: name: "jaeger" probability: 1 jaeger: - agent_endpoint: "localhost:6831" + agent: + endpoint: "localhost:6831" service_name: "clair" metrics: name: "prometheus" diff --git a/modules/clair-standalone-configure.adoc b/modules/clair-standalone-configure.adoc index fcb640b80..116246c10 100644 --- a/modules/clair-standalone-configure.adoc +++ b/modules/clair-standalone-configure.adoc @@ -24,7 +24,7 @@ $ mkdir /home//quay-poc/postgres-clairv4 $ setfacl -m u:26:-wx /home//quay-poc/postgres-clairv4 ---- -. Deploy a Clair Postgres database by entering the following command: +. Deploy a Clair PostgreSQL database by entering the following command: + [source,terminal] ---- @@ -33,16 +33,16 @@ $ sudo podman run -d --name postgresql-clairv4 \ -e POSTGRESQL_PASSWORD=clairpass \ -e POSTGRESQL_DATABASE=clair \ -e POSTGRESQL_ADMIN_PASSWORD=adminpass \ - -p 5433:5433 \ + -p 5433:5432 \ -v /home//quay-poc/postgres-clairv4:/var/lib/pgsql/data:Z \ - registry.redhat.io/rhel8/postgresql-10:1 + registry.redhat.io/rhel8/postgresql-15 ---- -. Install the Postgres `uuid-ossp` module for your Clair deployment: +. Install the PostgreSQL `uuid-ossp` module for your Clair deployment: + [source,terminal] ---- -$ podman exec -it postgresql-clairv4 /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"" | psql -d clair -U postgres' +$ sudo podman exec -it postgresql-clairv4 /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"" | psql -d clair -U postgres' ---- + .Example output @@ -53,19 +53,19 @@ CREATE EXTENSION + [NOTE] ==== -Clair requires the `uuid-ossp` extension to be added to its Postgres database. For users with proper privileges, creating the extension will automatically be added by Clair. If users do not have the proper privileges, the extension must be added before start Clair. +Clair requires the `uuid-ossp` extension to be added to its PostgreSQL database. For users with proper privileges, creating the extension will automatically be added by Clair. If users do not have the proper privileges, the extension must be added before start Clair. If the extension is not present, the following error will be displayed when Clair attempts to start: `ERROR: Please load the "uuid-ossp" extension. (SQLSTATE 42501)`. ==== . Stop the `Quay` container if it is running and restart it in configuration mode, loading the existing configuration as a volume: + -[source,terminal] +[subs="verbatim,attributes"] ---- $ sudo podman run --rm -it --name quay_config \ -p 80:8080 -p 443:8443 \ -v $QUAY/config:/conf/stack:Z \ - registry.redhat.io/quay/quay-rhel8:v3.8.2 config secret + {productrepo}/{quayimage}:{productminv} config secret ---- . Log in to the configuration tool and click *Enable Security Scanning* in the *Security Scanner* section of the UI. @@ -115,7 +115,6 @@ indexer: matcher: connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable max_conn_pool: 100 - run: "" migrations: true indexer_addr: clair-indexer notifier: @@ -132,7 +131,8 @@ trace: name: "jaeger" probability: 1 jaeger: - agent_endpoint: "localhost:6831" + agent: + endpoint: "localhost:6831" service_name: "clair" metrics: name: "prometheus" diff --git a/modules/clair-standalone-database.adoc b/modules/clair-standalone-database.adoc index 688a16c63..17b9c56b3 100644 --- a/modules/clair-standalone-database.adoc +++ b/modules/clair-standalone-database.adoc @@ -3,7 +3,7 @@ Clair requires a Postgres database. You can share a common database between Quay and Clair if Quay is also using Postgres, but in this example a separate, Clair-specific database is deployed. -In this proof-of-concept scenario, you will use a directory on the local file system to persist database data. +In this proof of concept scenario, you will use a directory on the local file system to persist database data. . In the installation folder, denoted here by the variable $QUAY, create a directory for the Clair database data and set the permissions appropriately: + diff --git a/modules/clair-standalone-quay-config.adoc b/modules/clair-standalone-quay-config.adoc index db2e55355..d3c1e1033 100644 --- a/modules/clair-standalone-quay-config.adoc +++ b/modules/clair-standalone-quay-config.adoc @@ -38,8 +38,11 @@ FEATURE_SECURITY_NOTIFICATIONS: true FEATURE_SECURITY_SCANNER: true ... SECURITY_SCANNER_INDEXING_INTERVAL: 30 +SECURITY_SCANNER_V4_MANIFEST_CLEANUP: true SECURITY_SCANNER_V4_ENDPOINT: http://quay-server.example.com:8081 SECURITY_SCANNER_V4_PSK: MTU5YzA4Y2ZkNzJoMQ== SERVER_HOSTNAME: quay-server.example.com +SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE: 8G <1> ... ---- +<1> The recommended maximum of `SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE` is `10G`. diff --git a/modules/clair-standalone-upgrade.adoc b/modules/clair-standalone-upgrade.adoc new file mode 100644 index 000000000..7cf7ef06c --- /dev/null +++ b/modules/clair-standalone-upgrade.adoc @@ -0,0 +1,38 @@ +:_content-type: CONCEPT +[id="clair-standalone-upgrade"] += Using Clair with an upstream image for {productname} + +For most users, independent upgrades of Clair from the current version ({clairproductminv}) are unnecessary. In some cases, however, customers might want to pull an image of Clair from the link:https://quay.io/repository/projectquay/clair[upstream repository] for various reasons, such as for specific bug fixes or to try new features that have not yet been released downstream. You can use the following procedure to run an upstream version of Clair with {productname}. + +[IMPORTANT] +==== +Upstream versions of Clair have not been fully tested for compatibility with {productname}. As a result, this combination might cause issues with your deployment. +==== + +.Procedure + +. Enter the following command to stop Clair if it is running: ++ +[source,terminal] +---- +$ podman stop +---- + +. Navigate to the link:https://quay.io/repository/projectquay/clair[upstream repository], find the version of Clair that you want to use, and pull it to your local machine. For example: ++ +[source,terminal] +---- +$ podman pull quay.io/projectquay/clair:nightly-2024-02-03 +---- + +. Start Clair by using the container image, mounting in the configuration from the file you created: ++ +[source,terminal] +---- +$ podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +-v /etc/opt/clairv4/config:/clair:Z \ +quay.io/projectquay/clair:nightly-2024-02-03 +---- diff --git a/modules/clair-testing.adoc b/modules/clair-testing.adoc index 98fb0b854..c1d79ca6f 100644 --- a/modules/clair-testing.adoc +++ b/modules/clair-testing.adoc @@ -47,4 +47,9 @@ image:clair-reposcan.png[Security scan information appears for scanned repositor . Click the image report, for example, *45 medium*, to show a more detailed report: + .Report details -image:clair-vulnerabilities.png[See all vulnerabilities or only those that are fixable] \ No newline at end of file +image:clair-vulnerabilities.png[See all vulnerabilities or only those that are fixable] ++ +[NOTE] +==== +In some cases, Clair shows duplicate reports on images, for example, `ubi8/nodejs-12` or `ubi8/nodejs-16`. This occurs because vulnerabilities with same name are for different packages. This behavior is expected with Clair vulnerability reporting and will not be addressed as a bug. +==== \ No newline at end of file diff --git a/modules/clair-troubleshooting-issues.adoc b/modules/clair-troubleshooting-issues.adoc new file mode 100644 index 000000000..50914530e --- /dev/null +++ b/modules/clair-troubleshooting-issues.adoc @@ -0,0 +1,173 @@ +:_content-type: PROCEDURE +[id="clair-troubleshooting-issues"] += Troubleshooting Clair issue + +Use the following procedures to troubleshoot Clair. + +[id="verify-image-compatibility"] +== Verifying image compatibility + +If you are using Clair, ensure that the images you are trying to scan are supported by Clair. Clair has certain requirements and does not support all image formats or configurations. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/clair-vulnerability-scanner#clair-vulnerability-scanner-hosts[Clair vulnerability databases]. + +[id="allowlist-clair-updaters"] +== Allowlisting Clair updaters + +If you are using Clair behind a proxy configuration, you must allowlist the updaters in your proxy or firewall configuration. For more information about updater URLs, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/clair-concepts#clair-updater-urls[Clair updater URLs]. + +[id="clair-dependencies-update"] +== Updating Clair scanner and its dependencies + +Ensure that you are using the latest version of Clair security scanner. Outdated versions might lack support for newer image formats, or might have known issues. + +Use the following procedure to check your version of Clair. + +[NOTE] +==== +Checking Clair logs can also be used to check if there are any errors from the updaters microservice in your Clair logs. By default, Clair updates the vulnerability database every 30 minutes. +==== + +.Procedure + +. Check your version of Clair. + +.. If you are running Clair on {productname-ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc logs clair-pod +---- + +.. If you are running a standalone deployment of {productname} and using a Clair container, enter the following command: ++ +[source,terminal] +---- +$ podman logs clair-container +---- ++ +.Example output ++ +[source,terminal] +---- +"level":"info", +"component":"main", +"version":"v4.5.1", +---- + +[id="enabling-debug-mode-clair"] +== Enabling debug mode for Clair + +By default, debug mode for Clair is disabled. You can enable debug mode for Clair by updating your `clair-config.yaml` file. + +.Prerequisites + +* For Clair on {productname-ocp} deployments, you must link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#custom-clair-configuration-managed-database[Running a custom Clair configuration with a managed Clair database]. + +Use the following procedure to enable debug mode for Clair. + +.Procedure + +. Update your `clair-config.yaml` file to include the debug option. + +.. On standalone {productname} deployments: + +... Add the following configuration field to your `clair-config.yaml` file: ++ +[source,yaml] +---- +log_level: debug +---- + +... Restart your Clair deployment by entering the following command: ++ +[source,terminal] +---- +$ podman restart +---- + +.. On {productname-ocp} deployments: + +... On the {ocp} web console, click *Operators* -> *Installed Operators* -> *Quay Registry*. + +... Click the name of your registry, for example, *Example Registry*. You are redirected to the *Details* page of your registry. + +... Click the Config Bundle Secret, for example, *example-registry-config-bundle-xncls*. + +... Confirm that you are running a custom Clair configuration by looking for the `clair-config.yaml` file under the *Data* section of the *Details* page of your secret. + +... If you have a `clair-config.yaml` file, click *Actions* -> *Edit Secret*. If you do not, see "Running a custom Clair configuration with a managed Clair database". + +... Update your `clair-config.yaml` file to include the `log_level: debug` configuration variable. For example: ++ +[source,yaml] +---- +log_level: debug +---- + +... Click *Save*. + +... You can check the status of your Clair deployment by clicking *Workloads* -> *Pods*. The `clair-app` pod should report `1/1` under the *Ready* category. + +... You can confirm that Clair is returning debugging information by clicking the *clair-app* pod that is ready -> *Logs*. + +[id="checking-clair-configuration"] +== Checking Clair configuration + +Check your Clair `config.yaml` file to ensure that there are no misconfigurations or inconsistencies that could lead to issues. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#config-fields-overview[Clair configuration overview]. + +[id="inspect-image-metadata"] +== Inspect image metadata + +In some cases, you might receive an *Unsupported* message. This might indicate that the scanner is unable to extract the necessary metadata from the image. Check if the image metadata is properly formatted and accessible. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/articles/7018077[Troubleshooting Clair]. + + +//// +[id="check-logs-updaters-errors"] +== Checking logs for updaters errors + +Check if there are any errors from the updaters microservice in your Clair logs. By default, Clair updates the vulnerability database every 30 minutes. + +Use the following procedure to check your Clair logs. + +.Procedure + +. Check your Clair logs. + +.. If you are running Clair on the {productname} Operator, enter the following command: ++ +[source,terminal] +---- +$ oc logs clair-pod +---- + +.. If you are running a standalone deployment of {productname} and using a Clair container, enter the following command: ++ +[source,terminal] +---- +$ podman logs clair-container +---- + + +[id="updating-cve-database"] +== Updating the CVE database + +Updating the CVE database can be a memory and CPU intensive task, especially if there are several CVEs that must be parsed. If the resources are exhausted during this process, the system kernel can stop the offending process. This should be visible in Docker logs, Podman logs, or in the system journal. For example: + +[source,terminal] +---- +May 14 21:48:14 vm-mtr3-live-k8s-00-ranchernode-4 kernel: [36611.338195] [26556] 0 26556 734467 386889 4165632 0 937 clair + +May 14 21:48:14 vm-mtr3-live-k8s-00-ranchernode-4 kernel: [36611.338227] Memory cgroup out of memory: Kill process 26556 (clair) score 1922 or sacrifice child + +May 14 21:48:14 vm-mtr3-live-k8s-00-ranchernode-4 kernel: [36611.339573] Killed process 26556 (clair) total-vm:2937868kB, anon-rss:1536364kB, file-rss:11192kB, shmem-rss:0kB + +May 14 21:48:14 vm-mtr3-live-k8s-00-ranchernode-4 kernel: [36611.396171] oom_reaper: reaped process 26556 (clair), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB +---- +//// + diff --git a/modules/clair-updater-urls.adoc b/modules/clair-updater-urls.adoc index b7e30e463..551b61903 100644 --- a/modules/clair-updater-urls.adoc +++ b/modules/clair-updater-urls.adoc @@ -4,19 +4,35 @@ :_content-type: CONCEPT [id="clair-updater-urls"] -= Clair updater URLs += Information about Clair updaters -The following are the HTTP hosts and paths that Clair will attempt to talk to in a default configuration. This list is non-exhaustive. Some servers issue redirects and some request URLs are constructed dynamically. +The following table provides details about each Clair updater, including the configuration parameter, a brief description, relevant URLs, and the associated components that they interact with. This list is not exhaustive, and some servers might issue redirects, while certain request URLs are dynamically constructed to ensure accurate vulnerability data retrieval. -* \https://secdb.alpinelinux.org/ -* \http://repo.us-west-2.amazonaws.com/2018.03/updates/x86_64/mirror.list -* \https://cdn.amazonlinux.com/2/core/latest/x86_64/mirror.list -* \https://www.debian.org/security/oval/ -* \https://linux.oracle.com/security/oval/ -* \https://packages.vmware.com/photon/photon_oval_definitions/ -* \https://github.com/pyupio/safety-db/archive/ -* \https://catalog.redhat.com/api/containers/ -* \https://www.redhat.com/security/data/ -* \https://support.novell.com/security/oval/ -* \https://people.canonical.com/~ubuntu-security/oval/ +For Clair, each updater is responsible for fetching and parsing vulnerability data related to a specific package type or distribution. For example, the Debian updater focuses on Debian-based Linux distributions, while the AWS updater focuses on vulnerabilities specific to Amazon Web Services' Linux distributions. Understanding the package type is important for vulnerability management because different package types might have unique security concerns and require specific updates and patches. +[NOTE] +==== +If you are using a proxy server in your environment with Clair's updater URLs, you must identify which URL needs to be added to the proxy allowlist to ensure that Clair can access them unimpeded. Use the following table to add updater URLs to your proxy allowlist. +==== + +.Clair updater information +[cols="1a,3a,3a,2a",options="header"] +|=== +|Updater |Description | URLs | Component +|`alpine` |The Alpine updater is responsible for fetching and parsing vulnerability data related to packages in Alpine Linux distributions. | * `\https://secdb.alpinelinux.org/` | Alpine Linux SecDB database +|`aws` | The AWS updater is focused on AWS Linux-based packages, ensuring that vulnerability information specific to Amazon Web Services' custom Linux distributions is kept up-to-date. | * `\http://repo.us-west-2.amazonaws.com/2018.03/updates/x86_64/mirror.list` +* `\https://cdn.amazonlinux.com/2/core/latest/x86_64/mirror.list` +* `\https://cdn.amazonlinux.com/al2023/core/mirrors/latest/x86_64/mirror.list` | Amazon Web Services (AWS) UpdateInfo +|`debian` |The Debian updater is essential for tracking vulnerabilities in packages associated with Debian-based Linux distributions. | * `\https://deb.debian.org/` +* `\https://security-tracker.debian.org/tracker/data/json` | Debian Security Tracker +|`clair.cvss`| The Clair Common Vulnerability Scoring System (CVSS) updater focuses on maintaining data about vulnerabilities and their associated CVSS scores. This is not tied to a specific package type but rather to the severity and risk assessment of vulnerabilities in general. | * `\https://nvd.nist.gov/feeds/json/cve/1.1/` | National Vulnerability Database (NVD) feed for Common Vulnerabilities and Exposures (CVE) data in JSON format +|`oracle` |The Oracle updater is dedicated to Oracle Linux packages, maintaining data on vulnerabilities that affect Oracle Linux systems. | * `\https://linux.oracle.com/security/oval/com.oracle.elsa-*.xml.bz2` | Oracle Oval database +|`photon`| The Photon updater deals with packages in VMware Photon OS. | * `\https://packages.vmware.com/photon/photon_oval_definitions/` | VMware Photon OS oval definitions +|`rhel` |The Red Hat Enterprise Linux (RHEL) updater is responsible for maintaining vulnerability data for packages in Red Hat's Enterprise Linux distribution. | * `\https://access.redhat.com/security/cve/` +* `\https://access.redhat.com/security/data/oval/v2/PULP_MANIFEST` | Red Hat Enterprise Linux (RHEL) Oval database +|`rhcc` | The Red Hat Container Catalog (RHCC) updater is connected to Red Hat's container images. This updater ensures that vulnerability information related to Red Hat's containerized software is kept current. | * `\https://access.redhat.com/security/data/metrics/cvemap.xml` | Resource Handler Configuration Controller (RHCC) database +|`suse`| The SUSE updater manages vulnerability information for packages in the SUSE Linux distribution family, including openSUSE, SUSE Enterprise Linux, and others. | * `\https://support.novell.com/security/oval/` | SUSE Oval database +|`ubuntu` | The Ubuntu updater is dedicated to tracking vulnerabilities in packages associated with Ubuntu-based Linux distributions. Ubuntu is a popular distribution in the Linux ecosystem. | * `\https://security-metadata.canonical.com/oval/com.ubuntu.*.cve.oval.xml` +* `\https://api.launchpad.net/1.0/` | Ubuntu Oval Database +|`osv` | The Open Source Vulnerability (OSV) updater specializes in tracking vulnerabilities within open source software components. OSV is a critical resource that provides detailed information about security issues found in various open source projects. | * `\https://osv-vulnerabilities.storage.googleapis.com/` | Open Source Vulnerabilities database +|=== \ No newline at end of file diff --git a/modules/clair-updaters.adoc b/modules/clair-updaters.adoc index ba0daf8f4..73db138e5 100644 --- a/modules/clair-updaters.adoc +++ b/modules/clair-updaters.adoc @@ -8,88 +8,4 @@ Clair uses `Go` packages called _updaters_ that contain the logic of fetching and parsing different vulnerability databases. -Updaters are usually paired with a matcher to interpret if, and how, any vulnerability is related to a package. Administrators might want to update the vulnerability database less frequently, or not import vulnerabilities from databases that they know will not be used. - -[id="configuring-updaters"] -== Configuring updaters - -Updaters can be configured by the `updaters` key at the top of the configuration. If updaters are being run automatically within the matcher process, which is the default setting, the period for running updaters is configured under the matcher's configuration field. - -[id="updater-sets"] -=== Updater sets - -The following sets can be configured with Clair updaters: - -* `alpine` -* `aws` -* `debian` -* `enricher/cvss` -* `libvuln/driver` -* `oracle` -* `photon` -* `pyupio` -* `rhel` -* `rhel/rhcc` -* `suse` -* `ubuntu` -* `updater` - -[id="selecting-updater-sets"] -=== Selecting updater sets - -Specific sets of updaters can be selected by the `sets` list. For example: - -[source,yaml] ----- -updaters: - sets: - - rhel ----- - -If the `sets` field is not populated, it defaults to using all sets. - -[id="filtering-updater-sets"] -=== Filtering updater sets - -To reject an updater from running without disabling an entire set, the `filter` option can be used. - -In the following example, the string is interpreted as a Go `regexp` package. This rejects any updater with a name that does not match. - -[NOTE] -==== -This means that an empty string matches any string. It does not mean that it matches no strings. -==== - -[source,yaml] ----- -updaters: - filter: '^$' ----- - -[id="configuring-specific-updaters"] -=== Configuring specific updaters - -Configuration for specific updaters can be passed by putting a key underneath the `config` parameter of the `updaters` object. The name of an updater might be constructed dynamically, and users should examine logs to ensure updater names are accurate. The specific object that an updater expects should be covered in the updater's documentation. - -In the following example, the `rhel` updater fetches a manifest from a different location: - -[source,yaml] ----- -updaters: - config: - rhel: - url: https://example.com/mirror/oval/PULP_MANIFEST ----- - -[id="disabling-clair-updater-component-managed-db"] -=== Disabling the Clair Updater component - -In some scenarios, users might want to disable the Clair updater component. Disabling updaters is required when running {productname} in a disconnected environment. - -In the following example, Clair updaters are disabled: - -[source,yaml] ----- -matcher: - disable_updaters: true ----- \ No newline at end of file +Updaters are usually paired with a matcher to interpret if, and how, any vulnerability is related to a package. Administrators might want to update the vulnerability database less frequently, or not import vulnerabilities from databases that they know will not be used. \ No newline at end of file diff --git a/modules/clair-vulnerability-scanner-hosts.adoc b/modules/clair-vulnerability-scanner-hosts.adoc index 4857eb947..e133afbc7 100644 --- a/modules/clair-vulnerability-scanner-hosts.adoc +++ b/modules/clair-vulnerability-scanner-hosts.adoc @@ -9,14 +9,27 @@ Clair uses the following vulnerability databases to report for issues in your images: * Ubuntu Oval database -* Debian Oval database +* Debian Security Tracker * {rhel} Oval database * SUSE Oval database * Oracle Oval database * Alpine SecDB database -* VMWare Photon OS database +* VMware Photon OS database * Amazon Web Services (AWS) UpdateInfo -* Pyup.io (Python) database +* link:https://osv.dev/[Open Source Vulnerability (OSV) Database] For information about how Clair does security mapping with the different databases, see -link:https://quay.github.io/claircore/concepts/severity_mapping.html[ClairCore Severity Mapping]. \ No newline at end of file +link:https://quay.github.io/claircore/concepts/severity_mapping.html[Claircore Severity Mapping]. + +[id="information-about-clair-osv"] +== Information about Open Source Vulnerability (OSV) database for Clair + +Open Source Vulnerability (OSV) is a vulnerability database and monitoring service that focuses on tracking and managing security vulnerabilities in open source software. + +OSV provides a comprehensive and up-to-date database of known security vulnerabilities in open source projects. It covers a wide range of open source software, including libraries, frameworks, and other components that are used in software development. For a full list of included ecosystems, see link:https://ossf.github.io/osv-schema/#affectedpackage-field[defined ecosystems]. + +Clair also reports vulnerability and security information for `golang`, `java`, and `ruby` ecosystems through the Open Source Vulnerability (OSV) database. + +By leveraging OSV, developers and organizations can proactively monitor and address security vulnerabilities in open source components that they use, which helps to reduce the risk of security breaches and data compromises in projects. + +For more information about OSV, see link:https://osv.dev/[the OSV website]. \ No newline at end of file diff --git a/modules/clair-vulnerability-scanner-overview.adoc b/modules/clair-vulnerability-scanner-overview.adoc index c83bae9c7..82580353f 100644 --- a/modules/clair-vulnerability-scanner-overview.adoc +++ b/modules/clair-vulnerability-scanner-overview.adoc @@ -4,18 +4,21 @@ :_content-type: CONCEPT [id="clair-vulnerability-scanner"] -= Clair for {productname} += Clair security scanner -Clair v4 (Clair) is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {productname} and can be used in both standalone and Operator deployments. It can be run in highly scalable configurations, where components can be scaled separately as appropriate for enterprise environments. +ifeval::["{context}" == "quay-io"] +Clair v4 (Clair) is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {quayio}, is automatically enabled, and is managed by the {productname} development team. + +For {quayio} users, images are automatically indexed after they are pushed to your repository. Reports are then fetched from Clair, which matches images against its CVE's database to report security information. This process happens automatically on {quayio}, and manual recans are not required. +endif::[] -//// -[NOTE] -==== -ifeval::["{productname}" == "Red Hat Quay"] -With the release of {productname} 3.4, Clair v4 (image {productrepo}/{clairimage} fully replaced Clair v2 (image quay.io/redhat/clair-jwt). See below for how to run Clair v2 in read-only mode while Clair v4 is updating. +ifeval::["{context}" == "clair"] +Clair v4 (Clair) is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {productname} and can be used in both standalone and Operator deployments. It can be run in highly scalable configurations, where components can be scaled separately as appropriate for enterprise environments. endif::[] -ifeval::["{productname}" == "Project Quay"] -With the release of Clair v4 (image clair), the previously used Clair v2 (image clair-jwt) is no longer used. See below for how to run Clair v2 in read-only mode while Clair v4 is updating. + +ifeval::["{context}" == "quay-security"] +Clair v4 (Clair) is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {productname} and can be used in both standalone and Operator deployments. It can be run in highly scalable configurations, where components can be scaled separately as appropriate for enterprise environments. + +For more information about Clair security scanner, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index[Vulnerability reporting with Clair on {productname}]. endif::[] -==== -//// \ No newline at end of file + diff --git a/modules/clairv4-air-gapped.adoc b/modules/clairv4-air-gapped.adoc index fe0b14c5c..aaf377af9 100644 --- a/modules/clairv4-air-gapped.adoc +++ b/modules/clairv4-air-gapped.adoc @@ -7,4 +7,4 @@ * On a system with internet access, the vulnerability database updates is performed manually and exported to a disk. * The on-disk data is then transferred to the target system with offline media. It is then manually imported. -For more information on air-gapped Clair v4 and using `clairctl`, the command line tool, see https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-disconnected-environments[Manually updating the vulnerability databases for Clair in an air-gapped OpenShift cluster] +For more information on air-gapped Clair v4 and using `clairctl`, the command line tool, see https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-disconnected-environments[Manually updating the vulnerability databases for Clair in an air-gapped OpenShift cluster] diff --git a/modules/con_quay_ha_prereq.adoc b/modules/con_quay_ha_prereq.adoc index f907dfcb8..52c8ff578 100644 --- a/modules/con_quay_ha_prereq.adoc +++ b/modules/con_quay_ha_prereq.adoc @@ -11,7 +11,7 @@ Here are a few things you need to know before you begin the {productname} high a * Either Postgres or MySQL can be used to provide the database service. Postgres was chosen here as the database because it includes the features needed to support Clair security scanning. Other options include: ** Crunchy Data PostgreSQL Operator: Although not supported directly by Red Hat, -the link:https://access.crunchydata.com/documentation/postgres-operator/latest/[CrunchDB Operator] +the link:https://access.crunchydata.com/documentation/postgres-operator/latest/[Postgres Operator] is available from link:https://www.crunchydata.com/[Crunchy Data] for use with {productname}. If you take this route, you should have a support contract with Crunchy Data and work directly with them for usage guidance or issues relating to the operator and their database. @@ -46,18 +46,13 @@ Each system should have the following attributes: //* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux server media from the link:https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.5/x86_64/product-software[Downloads page] and follow instructions from the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/installation_guide/index[Red Hat Enterprise Linux 7 Installation Guide] to install RHEL on each system. //** **Valid Red Hat Subscription**: Obtain Red Hat Enterprise Linux server subscriptions and apply one to each system. -* **Red Hat Enterprise Linux (RHEL)** 8: Obtain the latest Red Hat Enterprise Linux 8 server media from the link:https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.3/x86_64/product-software[Downloads page] and follow the installation instructions available in the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/[Product Documentation for Red Hat Enterprise Linux 8]. +* **Red Hat Enterprise Linux (RHEL)** 8: Obtain the latest Red Hat Enterprise Linux 8 server media from the link:https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.0/x86_64/product-software[Downloads page] and follow the installation instructions available in the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/[Product Documentation for Red Hat Enterprise Linux 9]. ** **Valid Red Hat Subscription**: Configure a valid Red Hat Enterprise Linux 8 server subscription. ** **CPUs**: Two or more virtual CPUs ** **RAM**: 4GB for each A and B system; 8GB for each C system ** **Disk space**: About 20GB of disk space for each A and B system (10GB for the operating system and 10GB for docker storage). At least 30GB of disk space for C systems (or more depending on required container storage). -[NOTE] -==== -Red Hat Enterprise Linux (RHEL) 8 is strongly recommended for highly available, production quality deployments of {productname} 3.7. RHEL 7 has not been tested with {productname} 3.7, and will be deprecated in a future release. -==== - [[using-podman]] == Using podman @@ -65,7 +60,7 @@ This document uses podman for creating and deploying containers. If you do not h [NOTE] ==== -Podman is strongly recommended for highly available, production quality deployments of {productname} 3.7. Docker has not been tested with {productname} 3.7, and will be deprecated in a future release. +Podman is strongly recommended for highly available, production quality deployments of {productname}. Docker has not been tested with {productname} {producty}, and will be deprecated in a future release. ==== diff --git a/modules/con_quay_intro.adoc b/modules/con_quay_intro.adoc index b1bdf9519..e52645213 100644 --- a/modules/con_quay_intro.adoc +++ b/modules/con_quay_intro.adoc @@ -1,12 +1,12 @@ :_content-type: CONCEPT [id="poc-overview"] -= Overview += {productname} features -{productname} includes the following features: +{productname} is regularly released with new features and software updates. The following features are available for {productname} deployments, however the list is not exhaustive: * High availability * Geo-replication -* Repository mirroring +* Repository mirroring * Docker v2, schema 2 (multi-arch) support * Continuous integration * Security scanning with Clair @@ -14,11 +14,16 @@ * Zero downtime garbage collection * 24/7 support +Users should check the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/index#doc-wrapper[{productname} Release Notes] for the latest feature information. + +[id="poc-support"] += {productname} support + {productname} provides support for the following: * Multiple authentication and access methods * Multiple storage backends -* Custom certificates for Quay, Clair, and storage backends +* Custom certificates for `Quay`, `Clair`, and storage backend containers * Application registries * Different container image types @@ -27,10 +32,12 @@ {productname} includes several core components, both internal and external. +For a fuller architectural breakdown, see the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_architecture/index[{productname} architecture] guide. + [id="poc-internal-components"] === Internal components -{productname} includes the following internal components: +{productname} includes the following internal components: * **Quay (container registry)**. Runs the `Quay` container as a service, consisting of several components in the pod. * **Clair**. Scans container images for vulnerabilities and suggests fixes. @@ -38,11 +45,11 @@ [id="poc-external-components"] === External components -{productname} includes the following external components: +{productname} includes the following external components: * **Database**. Used by {productname} as its primary metadata storage. Note that this is not for image storage. -* **Redis (key-value store)**. Stores live builder logs and the {productname} tutorial. -* **Cloud storage**. For supported deployments, one of the following storage types must be used: +* **Redis (key-value store)**. Stores live builder logs and the {productname} tutorial. Also includes the locking mechanism that is required for garbage collection. +* **Cloud storage**. For supported deployments, one of the following storage types must be used: ** **Public cloud storage**. In public cloud environments, you should use the cloud provider's object storage, such as Amazon Web Services's Amazon S3 or Google Cloud's Google Cloud Storage. ** **Private cloud storage**. In private clouds, an S3 or Swift compliant Object Store is needed, such as Ceph RADOS, or OpenStack Swift. diff --git a/modules/con_quay_single_prereq.adoc b/modules/con_quay_single_prereq.adoc index bf8279148..aa9fe73d2 100644 --- a/modules/con_quay_single_prereq.adoc +++ b/modules/con_quay_single_prereq.adoc @@ -4,9 +4,9 @@ ifeval::["{productname}" == "Red Hat Quay"] //* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux 7 server media from the link:https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.9/x86_64/product-software[Downloads page] and follow the installation instructions from the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/installation_guide/index[Red Hat Enterprise Linux 7 Installation Guide]. -* Red Hat Enterprise Linux (RHEL) 8 -** To obtain the latest version of {rhel} 8, see link:https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.3/x86_64/product-software[Downlad Red Hat Enterprise Linux]. -** For installation instructions, see the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/[Product Documentation for Red Hat Enterprise Linux 8]. +* Red Hat Enterprise Linux (RHEL) 9 +** To obtain the latest version of {rhel} 9, see link:https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.0/x86_64/product-software[Download Red Hat Enterprise Linux]. +** For installation instructions, see the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/[Product Documentation for Red Hat Enterprise Linux 9]. * An active subscription to Red Hat endif::[] * Two or more virtual CPUs @@ -21,19 +21,38 @@ endif::[] CEPH or other local storage might require more memory. ==== + -More information on sizing can be found at link:https://access.redhat.com/articles/5177961[Quay 3.x Sizing Guidlines]. - -[NOTE] -==== -Red Hat Enterprise Linux (RHEL) 8 is recommended for highly available, production quality deployments of {productname} {producty}. RHEL 7 has not been tested with {productname} {producty}, and will be deprecated in a future release. -==== +More information on sizing can be found at link:https://access.redhat.com/articles/5177961[Quay 3.x Sizing Guidelines]. +* The following architectures are supported for {productname}: +** amd64/x86_64 +** s390x +** ppc64le [id="poc-using-podman"] -== Using Podman +== Installing Podman + +This document uses Podman for creating and deploying containers. -This document uses Podman for creating and deploying containers. For more information on Podman and related technologies, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index[Building, running, and managing Linux containers on Red Hat Enterprise Linux 8]. +For more information on Podman and related technologies, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/building_running_and_managing_containers/index[Building, running, and managing Linux containers on Red Hat Enterprise Linux 9]. [IMPORTANT] ==== If you do not have Podman installed on your system, the use of equivalent Docker commands might be possible, however this is not recommended. Docker has not been tested with {productname} {producty}, and will be deprecated in a future release. Podman is recommended for highly available, production quality deployments of {productname} {producty}. -==== \ No newline at end of file +==== + +Use the following procedure to install Podman. + +.Procedure + +* Enter the following command to install Podman: ++ +[source,terminal] +---- +$ sudo yum install -y podman +---- + +* Alternatively, you can install the `container-tools` module, which pulls in the full set of container software packages: ++ +[source,terminal] +---- +$ sudo yum module install -y container-tools +---- \ No newline at end of file diff --git a/modules/con_schema.adoc b/modules/con_schema.adoc index ba0ab9b91..e2058a847 100644 --- a/modules/con_schema.adoc +++ b/modules/con_schema.adoc @@ -1,543 +1,5 @@ -[[quay-schema]] +:_content-type: CONCEPT +[id="quay-schema"] = Schema for {productname} configuration -Most {productname} configuration information is stored in the `config.yaml` file that is created -using the browser-based config tool when {productname} is first deployed. - - -// TODO 36 Add link to standalone config guide -// https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/ - -The configuration options are described in the {productname} Configuration Guide. - - - -//// -Most {productname} configuration information is stored in the `config.yaml` file that is created -using the browser-based config tool when {productname} is first deployed. This chapter describes -the schema of those settings that are available to use in the`config.yaml` file. - - -The following fields required (all other are optional): - ----- -AUTHENTICATION_TYPE -BUILDLOGS_REDIS -DATABASE_SECRET_KEY -DB_URI -DEFAULT_TAG_EXPIRATION -DISTRIBUTED_STORAGE_CONFIG -DISTRIBUTED_STORAGE_PREFERENCE -PREFERRED_URL_SCHEME -SECRET_KEY -SERVER_HOSTNAME -TAG_EXPIRATION_OPTIONS -USER_EVENTS_REDIS ----- - -* **ACTION_LOG_ARCHIVE_LOCATION** [string]: If action log archiving is enabled, the storage engine in which to place the archived data. -** **Example**: `s3_us_east` -* **ACTION_LOG_ARCHIVE_PATH** [string]: If action log archiving is enabled, the path in storage in which to place the archived data. -** **Example**: `archives/actionlogs` -* **ACTION_LOG_ROTATION_THRESHOLD** [string]: If action log archiving is enabled, the time interval after which to rotate logs. -** **Example**: `30d` -* **ALLOW_PULLS_WITHOUT_STRICT_LOGGING** [boolean]: If true, pulls in which the pull audit log entry cannot be written will still succeed. Useful if the database can fallback into a read-only state and it is desired for pulls to continue during that time. Defaults to False. -** **Example**: `True` -* **APP_SPECIFIC_TOKEN_EXPIRATION** [string, `null`]: The expiration for external app tokens. Defaults to None. -** **Pattern**: `^[0-9]+(w|m|d|h|s)$` -* **AUTHENTICATION_TYPE** [string] required: The authentication engine to use for credential authentication. -** **enum**: Database, LDAP, JWT, Keystone, OIDC. -** **Example**: `Database` -* **AVATAR_KIND** [string]: The types of avatars to display, either generated inline (local) or Gravatar (gravatar) -** **enum**: local, gravatar -* **BITBUCKET_TRIGGER_CONFIG** ['object', 'null']: Configuration for using BitBucket for build triggers. -** **consumer_key** [string] required: The registered consumer key(client ID) for this {productname} instance. -*** **Example**: `0e8dbe15c4c7630b6780` -* **BLACKLISTED_EMAIL_DOMAINS** [array]: The array of email-address domains that is used if FEATURE_BLACKLISTED_EMAILS is set to true. -** **Example**: `"example.com", "example.org"` -* **BLACKLIST_V2_SPEC** [string]: The Docker CLI versions to which {productname} will respond that V2 is *unsupported*. Defaults to `<1.6.0`. -** **Reference**: http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec -** **Example**: `<1.8.0` -* **BRANDING** [object]: Custom branding for logos and URLs in the {productname} UI. -** **Required**: logo -** **properties**: -*** **logo** [string]: Main logo image URL. -**** **Example**: `/static/img/quay-horizontal-color.svg` -*** **footer_img** [string]: Logo for UI footer. -**** **Example**: `/static/img/RedHat.svg` -*** **footer_url** [string]: Link for footer image. -**** **Example**: `https://redhat.com` -* **BROWSER_API_CALLS_XHR_ONLY** [boolean]: If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True. -** **Example**: False -* **BUILDLOGS_REDIS** [object] required: Connection information for Redis for build logs caching. -** **HOST** [string] required: The hostname at which Redis is accessible. -*** **Example**: `my.redis.cluster` -** **PASSWORD** [string]: The password to connect to the Redis instance. -*** **Example**: `mypassword` -** **PORT** [number]: The port at which Redis is accessible. -*** **Example**: `1234` -* **CONTACT_INFO** [array]: If specified, contact information to display on the contact page. If only a single piece of contact information is specified, the contact footer will link directly. -** **Min Items**: 1 -** **Unique Items**: True -*** **array item 0** [string]: Adds a link to send an e-mail -*** **Pattern**: ``^mailto:(.)+$`` -*** **Example**: `mailto:support@quay.io` -** **array item 1** [string]: Adds a link to visit an IRC chat room -*** **Pattern**: ``^irc://(.)+$`` -*** **Example**: `irc://chat.freenode.net:6665/quay` -** **array item 2** [string]: Adds a link to call a phone number -*** **Pattern**: ``^tel:(.)+$`` -*** **Example**: `tel:+1-888-930-3475` -** **array item 3** [string]: Adds a link to a defined URL -*** **Pattern**: ``^http(s)?://(.)+$`` -*** **Example**: `https://twitter.com/quayio` -* **DB_CONNECTION_ARGS** [object]: If specified, connection arguments for the database such as timeouts and SSL. -** **threadlocals** [boolean] required: Whether to use thread-local connections. Should *ALWAYS* be `true`. -** **autorollback** [boolean] required: Whether to use auto-rollback connections. Should *ALWAYS* be `true`. -** **ssl** [object]: SSL connection configuration -*** **ca** [string] required: Absolute container path to the CA certificate to use for SSL connections. -*** **Example**: `conf/stack/ssl-ca-cert.pem` -* **DATABASE_SECRET_KEY** [string] required: Key used to encrypt sensitive fields within the database. It is imperative that once set, this value is never changed. The consequence of changing this is invalidating all reliant fields (repository mirror username and password configurations, for example). -** **Example**: `40157269433064266822674401740626984898972632465622168464725100311621640999470` -* **DB_URI** [string] required: The URI at which to access the database, including any credentials. -** **Reference**: https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495 -** **Example**: `mysql+pymysql://username:password@dns.of.database/quay` -* **DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT** [number, `null`]: If not None, the default maximum number of builds that can be queued in a namespace. -** **Example**: `20` -* **DEFAULT_TAG_EXPIRATION** [string] required: The default, configurable tag expiration time for time machine. Defaults to `2w`. -** **Pattern**: ``^[0-9]+(w|m|d|h|s)$`` -* **DIRECT_OAUTH_CLIENTID_WHITELIST** [array]: A list of client IDs of *{productname}-managed* applications that are allowed to perform direct OAuth approval without user approval. -** **Min Items**: None -** **Unique Items**: True -** **Reference**: https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html -*** **array item** [string] -* **DISTRIBUTED_STORAGE_CONFIG** [object] required: Configuration for storage engine(s) to use in {productname}. Each key represents an unique identifier for a storage engine. The value consists of a tuple of (key, value) forming an object describing the storage engine parameters. - -** ** OCS / Noobaa**: -+ -``` -rhocsStorage: - - RHOCSStorage - - access_key: access_key_here - secret_key: secret_key_here - bucket_name: quay-datastore-9b2108a3-29f5-43f2-a9d5-2872174f9a56 - hostname: s3.openshift-storage.svc.cluster.local - is_secure: 'true' - port: '443' - storage_path: /datastorage/registry -``` -** **Ceph / RadosGW Storage / Hitachi HCP**: -+ -``` -radosGWStorage: - - RadosGWStorage - - access_key: access_key_here - secret_key: secret_key_here - bucket_name: bucket_name_here - hostname: hostname_here - is_secure: 'true' - port: '443' - storage_path: /datastorage/registry -``` - -** **AWS S3 Storage**: -+ -``` -s3Storage: - - S3Storage - - host: s3.ap-southeast-2.amazonaws.com - s3_access_key: s3_access_key_here - s3_secret_key: s3_secret_key_here - s3_bucket: s3_bucket_here - storage_path: /datastorage/registry -``` - -** **Azure Storage**: -+ -``` -azureStorage: - - AzureStorage - - azure_account_name: azure_account_name_here - azure_account_key: azure_account_key_here - azure_container: azure_container_here - sas_token: some/path/ - storage_path: /datastorage/registry -``` - -** **Google Cloud Storage**: -+ -``` -googleCloudStorage: - - GoogleCloudStorage - - access_key: access_key_here - secret_key: secret_key_here - bucket_name: bucket_name_here - storage_path: /datastorage/registry -``` - -** **Swift Storage**: -+ -``` -swiftStorage: - - SwiftStorage - - swift_user: swift_user_here - swift_password: swift_password_here - swift_container: swift_container_here - auth_url: https://example.org/swift/v1/quay - auth_version: 1 - ca_cert_path: /conf/stack/swift.cert" - storage_path: /datastorage/registry -``` -* **DEFAULT_SYSTEM_REJECT_QUOTA_BYTES** [string]: The quota size to apply to all organizations and users. -* **DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS** [array]: The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose images should be fully replicated, by default, to all other storage engines. -** **Min Items**: None -** **Example**: `s3_us_east, s3_us_west` -*** **array item** [string] -* **DISTRIBUTED_STORAGE_PREFERENCE** [array] required: The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to use. A preferred engine means it is first checked for pulling and images are pushed to it. -** **Min Items**: None -*** **Example**: `[u's3_us_east', u's3_us_west']` -*** **array item** [string] -** **preferred_url_scheme** [string] required: The URL scheme to use when hitting {productname}. If {productname} is behind SSL *at all*, this *must* be `https`. -*** **enum**: `http, https` -*** **Example**: `https` -* **DOCUMENTATION_ROOT** [string]: Root URL for documentation links. -* **ENABLE_HEALTH_DEBUG_SECRET** [string, `null`]: If specified, a secret that can be given to health endpoints to see full debug info when not authenticated as a superuser. -** **Example**: `somesecrethere` -* **EXPIRED_APP_SPECIFIC_TOKEN_GC** [string, `null`]: Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d. -** **pattern**: `^[0-9]+(w|m|d|h|s)$` -* **EXTERNAL_TLS_TERMINATION** [boolean]: If TLS is supported, but terminated at a layer before {productname}, must be true. -** **Example**: `True` -* **FEATURE_ACI_CONVERSION** [boolean]: Whether to enable conversion to ACIs. Defaults to False. -** **Example**: `False` -* **FEATURE_ACTION_LOG_ROTATION** [boolean]: Whether or not to rotate old action logs to storage. Defaults to False. -** **Example**: `False` -* **FEATURE_ADVERTISE_V2** [boolean]: Whether the v2/ endpoint is visible. Defaults to True. -** **Example**: `True` -* **FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL** [boolean]: Whether to allow retrieval of aggregated log counts. Defaults to True. -** **Example**: `True` -* **FEATURE_ANONYMOUS_ACCESS** [boolean]: Whether to allow anonymous users to browse and pull public repositories. Defaults to True. -** **Example**: `True` -* **FEATURE_APP_REGISTRY** [boolean]: Whether to enable support for App repositories. Defaults to False. -** **Example**: `False` -* **FEATURE_APP_SPECIFIC_TOKENS** [boolean]: If enabled, users can create tokens for use by the Docker CLI. Defaults to True. -** **Example**: False -* **FEATURE_BITBUCKET_BUILD** [boolean]: Whether to support Bitbucket build triggers. Defaults to False. -** **Example**: `False` -* **FEATURE_BLACKLISTED_EMAIL** -* **FEATURE_BUILD_SUPPORT** [boolean]: Whether to support Dockerfile build. Defaults to True. -** **Example**: `True` -* **FEATURE_CHANGE_TAG_EXPIRATION** [boolean]: Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True. -** **Example**: `False` -* **FEATURE_DIRECT_LOGIN** [boolean]: Whether users can directly login to the UI. Defaults to True. -** **Example**: `True` -* **FEATURE_GARBAGE_COLLECTION** [boolean]: Whether garbage collection of repositories is enabled. Defaults to True. -** **Example**: `True` -* **FEATURE_GITHUB_BUILD** [boolean]: Whether to support GitHub build triggers. Defaults to False. -** **Example**: `False` -* **FEATURE_GITHUB_LOGIN** [boolean]: Whether GitHub login is supported. Defaults to False. -** **Example**: `False` -* **FEATURE_GITLAB_BUILD**[boolean]: Whether to support GitLab build triggers. Defaults to False. -** **Example**: `False` -* **FEATURE_GOOGLE_LOGIN** [boolean]: Whether Google login is supported. Defaults to False. -** **Example**: `False` -* **FEATURE_INVITE_ONLY_USER_CREATION** [boolean]: Whether users being created must be invited by another user. Defaults to False. -** **Example**: `False` -* **FEATURE_LIBRARY_SUPPORT** [boolean]: Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True. -** **Example**: `True` -* **FEATURE_LOG_EXPORT** [boolean]: Whether to allow exporting of action logs. Defaults to True. -** **Example**: `True` -* **FEATURE_MAILING** [boolean]: Whether emails are enabled. Defaults to True. -** **Example**: `True` -* **FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP** [boolean]: If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False. -** **Example**: `True` -* **FEATURE_PARTIAL_USER_AUTOCOMPLETE** [boolean]: If set to true, autocompletion will apply to partial usernames. Defaults to True. -** **Example**: `True` -* **FEATURE_PERMANENT_SESSIONS** [boolean]: Whether sessions are permanent. Defaults to True. -** **Example**: `True` -* **FEATURE_PROXY_CACHE** [boolean]: Whether to enable proxy caching for {productname}. -** **Example**: `True` -* **FEATURE_PROXY_STORAGE** [boolean]: Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False. -** **Example**: `False` -* **FEATURE_PUBLIC_CATALOG** [boolean]: If set to true, the `_catalog` endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False. -** **Example**: `False` -* **FEATURE_QUOTA_MANAGEMENT** [boolean]: If set to true, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. -** **Example**: `True` -* **FEATURE_RATE_LIMITS** [boolean]: Whether to enable rate limits on API and registry endpoints. Defaults to False. -** **Example**: `False` -* **FEATURE_READER_BUILD_LOGS** [boolean]: If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False. -** **Example**: False -* **FEATURE_READONLY_APP_REGISTRY** [boolean]: Whether to App repositories are read-only. Defaults to False. -** **Example**: `True` -* **FEATURE_RECAPTCHA** [boolean]: Whether Recaptcha is necessary for user login and recovery. Defaults to False. -** **Example**: `False` -** **Reference**: https://www.google.com/recaptcha/intro/ -* **FEATURE_REPO_MIRROR** [boolean]: If set to true, enables repository mirroring. Defaults to False. -** **Example**: `False` -* **FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH** [boolean]: Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False. -** **Example**: `False` -* **FEATURE_REQUIRE_TEAM_INVITE** [boolean]: Whether to require invitations when adding a user to a team. Defaults to True. -** **Example**: `True` -* **FEATURE_RESTRICTED_V1_PUSH** [boolean]: If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push. Defaults to True. -** **Example**: `True` -* **FEATURE_SECURITY_NOTIFICATIONS** [boolean]: If the security scanner is enabled, whether to turn on/off security notifications. Defaults to False. -** **Example**: `False` -* **FEATURE_SECURITY_SCANNER** [boolean]: Whether to turn on/off the security scanner. Defaults to False. -** **Reference**: https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/#clair-initial-setup -** **Example**: `False` -* **FEATURE_STORAGE_REPLICATION** [boolean]: Whether to automatically replicate between storage engines. Defaults to False. -** **Example**: `False` -* **FEATURE_SUPER_USERS** [boolean]: Whether superusers are supported. Defaults to True. -** **Example**: `True` -* **FEATURE_TEAM_SYNCING** [boolean]: Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone). -** **Example**: `True` -* **FEATURE_USER_CREATION** [boolean]: Whether users can be created (by non-superusers). Defaults to True. -** **Example**: `True` -* **FEATURE_USER_LAST_ACCESSED** [boolean]: Whether to record the last time a user was accessed. Defaults to True. -** **Example**: `True` -* **FEATURE_USER_LOG_ACCESS** [boolean]: If set to true, users will have access to audit logs for their namespace. Defaults to False. -** **Example**: `True` -* **FEATURE_USER_METADATA** [boolean]: Whether to collect and support user metadata. Defaults to False. -** **Example**: `False` -* **FEATURE_USERNAME_CONFIRMATION** [boolean]: If set to true, users can confirm their generated usernames. Defaults to True. -** **Example**: `False` -* **FEATURE_USER_RENAME** [boolean]: If set to true, users can rename their own namespace. Defaults to False. -** **Example**: `True` -* **FRESH_LOGIN_TIMEOUT** [string]: The time after which a fresh login requires users to reenter their password -** **Example**: `5m` -* **GITHUB_LOGIN_CONFIG** [object, 'null']: Configuration for using GitHub (Enterprise) as an external login provider. -** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-auth.html -** **allowed_organizations** [array]: The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option. -*** **Min Items**: None -*** **Unique Items**: True -**** **array item** [string] -** **API_ENDPOINT** [string]: The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com. -*** **Example**: `https://api.github.com/` -** **CLIENT_ID** [string] required: The registered client ID for this {productname} instance; cannot be shared with GITHUB_TRIGGER_CONFIG. -*** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-app.html -*** **Example**: `0e8dbe15c4c7630b6780` -** **CLIENT_SECRET** [string] required: The registered client secret for this {productname} instance. -*** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-app.html -*** **Example**: `e4a58ddd3d7408b7aec109e85564a0d153d3e846` -** **GITHUB_ENDPOINT** [string] required: The endpoint of the GitHub (Enterprise) being hit. -*** **Example**: `https://github.com/` -** **ORG_RESTRICT** [boolean]: If true, only users within the organization whitelist can login using this provider. -** **Example**: `True` -* **GITHUB_TRIGGER_CONFIG** [object, `null`]: Configuration for using GitHub (Enterprise) for build triggers. -** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-build.html -** **API_ENDPOINT** [string]: The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com. -*** **Example**: `https://api.github.com/` -** **CLIENT_ID** [string] required: The registered client ID for this {productname} instance; cannot be shared with GITHUB_LOGIN_CONFIG. -*** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-app.html -*** **Example**: `0e8dbe15c4c7630b6780` -** **CLIENT_SECRET** [string] required: The registered client secret for this {productname} instance. -*** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-app.html -*** **Example**: `e4a58ddd3d7408b7aec109e85564a0d153d3e846` -** **GITHUB_ENDPOINT** [string] required: The endpoint of the GitHub (Enterprise) being hit. -*** **Example**: `https://github.com/` -* **GITLAB_TRIGGER_CONFIG** [object]: Configuration for using Gitlab (Enterprise) for external authentication. -** **CLIENT_ID** [string] required: The registered client ID for this {productname} instance. -*** **Example**: `0e8dbe15c4c7630b6780` -** **CLIENT_SECRET** [string] required: The registered client secret for this {productname} instance. -*** **Example**: `e4a58ddd3d7408b7aec109e85564a0d153d3e846` -*** **gitlab_endpoint** [string] required: The endpoint at which Gitlab(Enterprise) is running. -**** **Example**: `https://gitlab.com` -* **GOOGLE_LOGIN_CONFIG** [object, `null`]: Configuration for using Google for external authentication -** **CLIENT_ID** [string] required: The registered client ID for this {productname} instance. -*** **Example**: `0e8dbe15c4c7630b6780` -** **CLIENT_SECRET** [string] required: The registered client secret for this {productname} instance. -*** **Example**: e4a58ddd3d7408b7aec109e85564a0d153d3e846 -* **GPG2_PRIVATE_KEY_FILENAME** [string]: The filename of the private key used to decrypte ACIs. -** **Example**: `/path/to/file` -* **GPG2_PRIVATE_KEY_NAME** [string]: The name of the private key used to sign ACIs. -** **Example**: `gpg2key` -* **GPG2_PUBLIC_KEY_FILENAME** [string]: The filename of the public key used to encrypt ACIs. -** **Example**: `/path/to/file` -* **HEALTH_CHECKER** [string]: The configured health check. -** **Example**: `('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'})` -* **JWT_AUTH_ISSUER** [string]: The endpoint for JWT users. -** **Example**: `http://192.168.99.101:6060` -** **Pattern**: `^http(s)?://(.)+$` -* **JWT_GETUSER_ENDPOINT** [string]: The endpoint for JWT users. -** **Example**: `http://192.168.99.101:6060` -** **Pattern**: `^http(s)?://(.)+$` -* **JWT_QUERY_ENDPOINT** [string]: The endpoint for JWT queries. -** **Example**: `http://192.168.99.101:6060` -** **Pattern**: `^http(s)?://(.)+$` -* **JWT_VERIFY_ENDPOINT** [string]: The endpoint for JWT verification. -** **Example**: `http://192.168.99.101:6060` -** **Pattern**: `^http(s)?://(.)+$` -* **LDAP_ADMIN_DN** [string]: The admin DN for LDAP authentication. -* **LDAP_ADMIN_PASSWD** [string]: The admin password for LDAP authentication. -* **LDAP_ALLOW_INSECURE_FALLBACK** [boolean]: Whether or not to allow SSL insecure fallback for LDAP authentication. -* **LDAP_BASE_DN** [string]: The base DN for LDAP authentication. -* **LDAP_EMAIL_ATTR** [string]: The email attribute for LDAP authentication. -* **LDAP_UID_ATTR** [string]: The uid attribute for LDAP authentication. -* **LDAP_URI** [string]: The LDAP URI. -* **LDAP_USER_FILTER** [string]: The user filter for LDAP authentication. -* **LDAP_USER_RDN** [array]: The user RDN for LDAP authentication. -* **LOGS_MODEL** [string]: Logs model for action logs. -** **enum**: database, transition_reads_both_writes_es, elasticsearch -** **Example**: `database` -* **LOGS_MODEL_CONFIG** [object]: Logs model config for action logs -** **elasticsearch_config** [object]: Elasticsearch cluster configuration -*** **access_key** [string]: Elasticsearch user (or IAM key for AWS ES) -**** **Example**: `some_string` -*** **host** [string]: Elasticsearch cluster endpoint -**** **Example**: `host.elasticsearch.example` -*** **index_prefix** [string]: Elasticsearch's index prefix -**** **Example**: `logentry_` -*** **index_settings** [object]: Elasticsearch's index settings -*** **use_ssl** [boolean]: Use ssl for Elasticsearch. Defaults to True -**** **Example**: `True` -*** **secret_key** [string]: Elasticsearch password (or IAM secret for AWS ES) -**** **Example**: `some_secret_string` -*** **aws_region** [string]: Amazon web service region -**** **Example**: `us-east-1` -*** **port** [number]: Elasticsearch cluster endpoint port -**** **Example**: `1234` -** **kinesis_stream_config** [object]: AWS Kinesis Stream configuration -*** **aws_secret_key** [string]: AWS secret key -**** **Example**: `some_secret_key` -*** **stream_name** [string]: Kinesis stream to send action logs to -**** **Example**: `logentry-kinesis-stream` -*** **aws_access_key** [string]: AWS access key -**** **Example**: `some_access_key` -*** **retries** [number]: Max number of attempts made on a single request -**** **Example**: `5` -*** **read_timeout** [number]: Number of seconds before timeout when reading from a connection -**** **Example**: `5` -*** **max_pool_connections** [number]: The maximum number of connections to keep in a connection pool -**** **Example**: `10` -*** **aws_region** [string]: AWS region -**** **Example**: `us-east-1` -*** **connect_timeout** [number]: Number of seconds before timeout when attempting to make a connection -**** **Example**: `5` -** **producer** [string]: Logs producer if logging to Elasticsearch -*** **enum**: kafka, elasticsearch, kinesis_stream -*** **Example**: `kafka` -** **kafka_config** [object]: Kafka cluster configuration -*** **topic** [string]: Kafka topic to publish log entries to -**** **Example**: `logentry` -*** **bootstrap_servers** [array]: List of Kafka brokers to bootstrap the client from -*** **max_block_seconds** [number]: Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable -**** **Example**: `10` -* **LOG_ARCHIVE_LOCATION** [string]: If builds are enabled, the storage engine in which to place the archived build logs. -** **Example**: `s3_us_east` -* **LOG_ARCHIVE_PATH** [string]: If builds are enabled, the path in storage in which to place the archived build logs. -** **Example**: `archives/buildlogs` -* **LOGS_MODEL** [string]: Logs model for action logs. -* **enum**: `database`, `transition_reads_both_writes_es`, `elasticsearch` -* **Example**: `database` -* **MAIL_DEFAULT_SENDER** [string, `null`]: If specified, the e-mail address used as the `from` when {productname} sends e-mails. If none, defaults to `support@quay.io`. -** **Example**: `support@myco.com` -* **MAIL_PASSWORD** [string, `null`]: The SMTP password to use when sending e-mails. -** **Example**: `mypassword` -* **MAIL_PORT** [number]: The SMTP port to use. If not specified, defaults to 587. -** **Example**: `588` -* **MAIL_SERVER** [string]: The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true. -** **Example**: `smtp.somedomain.com` -* **MAIL_USERNAME** [string, 'null']: The SMTP username to use when sending e-mails. -** **Example**: `myuser` -* **MAIL_USE_TLS** [boolean]: If specified, whether to use TLS for sending e-mails. -** **Example**: `True` -* **MAXIMUM_LAYER_SIZE** [string]: Maximum allowed size of an image layer. Defaults to 20G. -** **Pattern**: ``^[0-9]+(G|M)$`` -** **Example**: `100G` -* **PREFERRED_URL_SCHEME** [string]: The URL scheme to use when hitting -{productname}. If {productname} is behind SSL *at all*, this *must* be `https` -** **enum**: `http` or `https` -** **Example**: `https` -* **PROMETHEUS_NAMESPACE** [string]: The prefix applied to all exposed Prometheus metrics. Defaults to `quay`. -** **Example**: `myregistry` -* **PUBLIC_NAMESPACES** [array]: If a namespace is defined in the public namespace list, then it will appear on *all* user's repository list pages, regardless of whether that user is a member of the namespace. Typically, this is used by an enterprise customer in configuring a set of "well-known" namespaces. -** **Min Items**: None -** **Unique Items**: True -*** **array item** [string] -* **RECAPTCHA_SECRET_KEY** [string]: If recaptcha is enabled, the secret key for the Recaptcha service. -* **RECAPTCHA_SITE_KEY** [string]: If recaptcha is enabled, the site key for the Recaptcha service. -* **REGISTRY_STATE** [string]: The state of the registry. -** **enum**: `normal` or `read-only` -** **Example**: `read-only` -* **REGISTRY_TITLE** [string]: If specified, the long-form title for the registry. Defaults to `Quay Enterprise`. -** **Example**: `Corp Container Service` -* **REGISTRY_TITLE_SHORT** [string]: If specified, the short-form title for the registry. Defaults to `Quay Enterprise`. -** **Example**: `CCS` -* **REPO_MIRROR_INTERVAL** [number]: The number of seconds between checking for repository mirror candidates. Defaults to 30. -** **Example**: `30` -* **REPO_MIRROR_SERVER_HOSTNAME** [string]: Replaces the SERVER_HOSTNAME as the destination for mirroring. Defaults to unset. -** **Example**: `openshift-quay-service` -* **REPO_MIRROR_TLS_VERIFY** [boolean]: Require HTTPS and verify certificates of Quay registry during mirror. Defaults to True. -** **Example**: `True` -* **SEARCH_MAX_RESULT_PAGE_COUNT** [number]: Maximum number of pages the user can paginate in search before they are limited. Defaults to 10. -** **Example**: `10` -* **SEARCH_RESULTS_PER_PAGE** [number]: Number of results returned per page by search page. Defaults to 10. -** **Example**: `10` -* **SECRET_KEY** [string] required: Key used to encrypt sensitive fields within the database and a run time. It is imperative that once set, this value is never changed. The consequence of changing this is invalidating all reliant fields (encrypted password credentials, for example). -** **Example**: `40157269433064266822674401740626984898972632465622168464725100311621640999470` -* **SECURITY_SCANNER_ENDPOINT** [string]: The endpoint for the security scanner. -** **Pattern**: ``^http(s)?://(.)+$`` -** **Example**: `http://192.168.99.101:6060` -* **SECURITY_SCANNER_INDEXING_INTERVAL** [number]: The number of seconds between indexing intervals in the security scanner. Defaults to 30. -** **Example**: `30` -* **SECURITY_SCANNER_NOTIFICATIONS** [boolean]: Whether or not to the security scanner notification feature -** **Example**: `false` -* **SECURITY_SCANNER_V4_ENDPOINT** [string]: The endpoint for the V4 security scanner. -** **Pattern**: ``^http(s)?://(.)+$`` -** **Example**: `http://192.168.99.101:6060` -* **SECURITY_SCANNER_V4_PSK** [string]: The generated pre-shared key (PSK) for Clair. -* **SERVER_HOSTNAME** [string] required: The URL at which {productname} is accessible, without the scheme. -** **Example**: `quay.io` -* **SESSION_COOKIE_SECURE** [boolean]: Whether the `secure` property should be set on session cookies. Defaults to False. Recommended to be True for all installations using SSL. -** **Example**: True -** **Reference**: https://en.wikipedia.org/wiki/Secure_cookies -* **SSL_CIPHERS** [array]: If specified, the nginx-defined list of SSL ciphers to enabled and disabled. -** **Example**: `CAMELLIA`, `!3DES` -* **SSL_PROTOCOLS** [array]: If specified, nginx is configured to enabled a list -of SSL protocols defined in the list. -Removing an SSL protocol from the list disables the protocol during {productname} startup. -** **SSL_PROTOCOLS**: ['TLSv1','TLSv1.1','TLSv1.2'] -* **SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD** [number]: If not None, the number of successive failures that -can occur before a build trigger is automatically disabled. Defaults to 100. -** **Example**: `50` -* **SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD** [number]: If not None, the number of successive internal errors that -can occur before a build trigger is automatically disabled. Defaults to 5. -* **SUPER_USERS** [array]: {productname} usernames of those users to be granted superuser privileges. -** **Min Items**: None -** **Unique Items**: True -*** **array item** [string] -* **TAG_EXPIRATION_OPTIONS** [array] required: The options that users can select for expiration of tags in their namespace (if enabled). -** **Min Items**: None -** **array item** [string] -** **Pattern**: ``^[0-9]+(w|m|d|h|s)$`` -* **TEAM_RESYNC_STALE_TIME** [string]: If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m). -** **Pattern**: ``^[0-9]+(w|m|d|h|s)$`` -** **Example**: `2h` -* **USERFILES_LOCATION** [string]: ID of the storage engine in which to place user-uploaded files -** **Example**: `s3_us_east` -* **USERFILES_PATH** [string]: Path under storage in which to place user-uploaded files -** **Example**: `userfiles` -* **USER_EVENTS_REDIS** [object] required: Connection information for Redis for user event handling. -** **HOST** [string] required: The hostname at which Redis is accessible. -*** **Example**: `my.redis.cluster` -** **PASSWORD** [string]: The password to connect to the Redis instance. -*** **Example**: `mypassword` -** **PORT** [number]: The port at which Redis is accessible. -*** **Example**: `1234` -** **CONSUMER_SECRET** [string] required: The registered consumer secret(client secret) for this {productname} instance -*** **Example**: e4a58ddd3d7408b7aec109e85564a0d153d3e846 -* **USERFILES_LOCATION** [string]: ID of the storage engine in which to place user-uploaded files. -** **Example**: `s3_us_east` -* **USERFILES_PATH** [string]: Path under storage in which to place user-uploaded files. -** **Example**: `userfiles` -* **USER_RECOVERY_TOKEN_LIFETIME** [string]: The length of time a token for recovering a user accounts is valid. Defaults to 30m. -** **Example**: `10m` -** **Pattern**: `^[0-9]+(w|m|d|h|s)$` -* **V1_PUSH_WHITELIST** [array]: The array of namespace names that support V1 push if -FEATURE_RESTRICTED_V1_PUSH is set to true. -** **Example**: `some`, `namespaces` -* **V2_PAGINATION_SIZE** [number]: The number of results returned per page in V2 registry APIs. -** **Example**: `100` -* **WEBHOOK_HOSTNAME_BLACKLIST** [array]: The set of hostnames to disallow from webhooks when validating, beyond localhost. -** **Example**: `someexternaldomain.com` - -//// +Most {productname} configuration information is stored in the `config.yaml` file. All configuration options are described in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#doc-wrapper[{productname} Configuration Guide]. \ No newline at end of file diff --git a/modules/conc_quay-bridge-operator.adoc b/modules/conc_quay-bridge-operator.adoc index 876b7700b..e3d9a3830 100644 --- a/modules/conc_quay-bridge-operator.adoc +++ b/modules/conc_quay-bridge-operator.adoc @@ -1,16 +1,16 @@ :_content-type: CONCEPT -[[quay-bridge-operator]] +[id="quay-bridge-operator"] = Integrating {productname} into {ocp} with the {qbo} -Using the {qbo}, you can replace the integrated container registry in {ocp} with a {productname} registry. By doing this, your integrated {ocp} registry becomes a highly available, enterprise-grade {productname} registry with enhanced role based access control (RBAC) features. +The {qbo} duplicates the features of the integrated {ocp} registry into the new {productname} registry. Using the {qbo}, you can replace the integrated container registry in {ocp} with a {productname} registry. -The primary goal of the {qbo} is to duplicate the features of the integrated {ocp} registry in the new {productname} registry. The features enabled with the {qbo} include: +The features enabled with the {qbo} include: * Synchronizing {ocp} namespaces as {productname} organizations. * Creating robot accounts for each default namespace service account. * Creating secrets for each created robot account, and associating each robot secret to a service account as `Mountable` and `Image Pull Secret`. * Synchronizing {ocp} image streams as {productname} repositories. * Automatically rewriting new builds making use of image streams to output to {productname}. -* Automatically importing an image stream tag once a build completes. +* Automatically importing an image stream tag after a build completes. -By using the following procedures, you will enable bi-directional communication between your {productname} and {ocp} clusters. +By using the following procedures, you can enable bi-directional communication between your {productname} and {ocp} clusters. diff --git a/modules/config-additional-ca-certs-operator.adoc b/modules/config-additional-ca-certs-operator.adoc new file mode 100644 index 000000000..def829b9f --- /dev/null +++ b/modules/config-additional-ca-certs-operator.adoc @@ -0,0 +1,8 @@ +[id="config-additional-cas-ocp"] += Adding additional Certificate Authorities to {productname-ocp} + +On {productname-ocp}, the `extra_ca_certs` configuration field is is used to populate additional Certificate Authorities (CAs) into the CA directory, which then adds the CAs into the system trust bundle. These certificates are used by {productname} to verify SSL/TLS connections with external services like LDAP, OIDC, and storage systems. + +When deploying or redeploying {productname-ocp}, you can add one, or multiple, CAs into the CA directory to ensure that external services are properly secured and validated. On {productname-ocp} deployments, you must manually add the `extra_ca_certs` configuration field to your `config.yaml` file and re-upload the `config.yaml` to {ocp}. + +The following procedures show you how to download your existing configuration file, add additional CAs to your {productname-ocp} deployment, and then re-upload the configuration file. \ No newline at end of file diff --git a/modules/config-custom-ssl-certs-kubernetes.adoc b/modules/config-custom-ssl-certs-kubernetes.adoc index 956c9d12e..ed2fab52d 100644 --- a/modules/config-custom-ssl-certs-kubernetes.adoc +++ b/modules/config-custom-ssl-certs-kubernetes.adoc @@ -1,42 +1,53 @@ -[[config-custom-ssl-cert-kubernetes]] -= Add certs when deployed on Kubernetes +[id="config-custom-ssl-cert-kubernetes"] += Adding custom SSL/TLS certificates when {productname} is deployed on Kubernetes -When deployed on Kubernetes, {productname} mounts in a secret as a volume to store -config assets. Unfortunately, this currently breaks the upload -certificate function of the superuser panel. +When deployed on Kubernetes, {productname} mounts in a secret as a volume to store config assets. Currently, this breaks the upload certificate function of the superuser panel. -To get around this error, a base64 encoded certificate can be added to -the secret _after_ {productname} has been deployed. Here's how: +As a temporary workaround, `base64` encoded certificates can be added to the secret _after_ {productname} has been deployed. -. Begin by base64 encoding the contents of the certificate: -+ -``` -$ cat ca.crt ------BEGIN CERTIFICATE----- -MIIDljCCAn6gAwIBAgIBATANBgkqhkiG9w0BAQsFADA5MRcwFQYDVQQKDA5MQUIu -TElCQ09SRS5TTzEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTE2 -MDExMjA2NTkxMFoXDTM2MDExMjA2NTkxMFowOTEXMBUGA1UECgwOTEFCLkxJQkNP -UkUuU08xHjAcBgNVBAMMFUNlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZI -[...] ------END CERTIFICATE----- +Use the following procedure to add custom SSL/TLS certificates when {productname} is deployed on Kubernetes. + +.Prerequisites + +* {productname} has been deployed. +* You have a custom `ca.crt` file. +.Procedure + +. Base64 encode the contents of an SSL/TLS certificate by entering the following command: ++ +[source,terminal] +---- $ cat ca.crt | base64 -w 0 -[...] -c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= -``` -. Use the `kubectl` tool to edit the quay-enterprise-config-secret. +---- + -``` +.Example output ++ +[source,terminal] +---- +...c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= +---- + +. Enter the following `kubectl` command to edit the `quay-enterprise-config-secret` file: ++ +[source,terminal] +---- $ kubectl --namespace quay-enterprise edit secret/quay-enterprise-config-secret -``` -. Add an entry for the cert and paste the full base64 encoded string under -the entry: +---- + +. Add an entry for the certificate and paste the full `base64` encoded stringer under the entry. For example: + -``` +[source,terminal] +---- custom-cert.crt: c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= -``` +---- -. Finally, recycle all {productname} pods. Use `kubectl delete` to remove all {productname} -pods. The {productname} Deployment will automatically schedule replacement pods -with the new certificate data. +. Use the `kubectl delete` command to remove all {productname} pods. For example: ++ +[source,terminal] +---- +$ kubectl delete pod quay-operator.v3.7.1-6f9d859bd-p5ftc quayregistry-clair-postgres-7487f5bd86-xnxpr quayregistry-quay-app-upgrade-xq2v6 quayregistry-quay-database-859d5445ff-cqthr quayregistry-quay-redis-84f888776f-hhgms +---- ++ +Afterwards, the {productname} deployment automatically schedules replace pods with the new certificate data. \ No newline at end of file diff --git a/modules/config-custom-ssl-certs-manual.adoc b/modules/config-custom-ssl-certs-manual.adoc index 2ee0c45c7..464cbaa93 100644 --- a/modules/config-custom-ssl-certs-manual.adoc +++ b/modules/config-custom-ssl-certs-manual.adoc @@ -1,53 +1,95 @@ -[[config-custom-ssl-certs-manual]] -= Adding TLS Certificates to the {productname} Container +[id="config-extra-ca-certs-standalone"] += Adding additional Certificate Authorities to the {productname} container -To add custom TLS certificates to {productname}, create a new -directory named `extra_ca_certs/` beneath the {productname} config -directory. Copy any required site-specific TLS certificates to this new -directory. +The `extra_ca_certs` directory is the directory where additional Certificate Authorities (CAs) can be stored to extend the set of trusted certificates. These certificates are used by {productname} to verify SSL/TLS connections with external services. When deploying {productname}, you can place the necessary CAs in this directory to ensure that connections to services like LDAP, OIDC, and storage systems are properly secured and validated. -[[add-certificates-to-quay-container]] -== Add TLS certificates to {productname} -. View certificate to be added to the container +For standalone {productname} deployments, you must create this directory and copy the additional CA certificates into that directory. + +.Prerequisites + +* You have a CA for the desired service. + +.Procedure + +. View the certificate to be added to the container by entering the following command: + -``` +[source,terminal] +---- $ cat storage.crt +---- ++ +.Example output ++ +[source,terminal] +---- -----BEGIN CERTIFICATE----- -MIIDTTCCAjWgAwIBAgIJAMVr9ngjJhzbMA0GCSqGSIb3DQEBCwUAMD0xCzAJBgNV -[...] +MIIDTTCCAjWgAwIBAgIJAMVr9ngjJhzbMA0GCSqGSIb3DQEBCwUAMD0xCzAJBgNV... -----END CERTIFICATE----- -``` +---- + +. Create the `extra_ca_certs` in the `/config` folder of your {productname} directory by entering the following command: ++ +[source,terminal] +---- +$ mkdir -p /path/to/quay_config_folder/extra_ca_certs +---- -. Create certs directory and copy certificate there +. Copy the CA file to the `extra_ca_certs` folder. For example: + -``` -$ mkdir -p quay/config/extra_ca_certs -$ cp storage.crt quay/config/extra_ca_certs/ -$ tree quay/config/ -├── config.yaml -├── extra_ca_certs -│   ├── storage.crt -``` +[source,terminal] +---- +$ cp storage.crt /path/to/quay_config_folder/extra_ca_certs/ +---- -. Obtain the `Quay` container's `CONTAINER ID` with `podman ps`: +. Ensure that the `storage.crt` file exists within the `extra_ca_certs` folder by entering the following command: ++ +[source,terminal] +---- +$ tree /path/to/quay_config_folder/extra_ca_certs +---- + -[subs="verbatim,attributes"] -``` -$ sudo podman ps +.Example output ++ +[source,terminal] +---- +/path/to/quay_config_folder/extra_ca_certs +├── storage.crt---- +---- + +. Obtain the `CONTAINER ID` of your `Quay` consider by entering the following command: ++ +[source,terminal] +---- +$ podman ps +---- ++ +.Example output ++ +[source,terminal] +---- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS 5a3e82c4a75f //quay:{productminv} "/sbin/my_init" 24 hours ago Up 18 hours 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp, 443/tcp grave_keller -``` +---- -. Restart the container with that ID: +. Restart the container by entering the following command + -``` -$ sudo podman restart 5a3e82c4a75f -``` +[source,terminal] +---- +$ podman restart 5a3e82c4a75f +---- -. Examine the certificate copied into the container namespace: +. Confirm that the certificate was copied into the container namespace by running the following command: ++ +[source,terminal] +---- +$ podman exec -it 5a3e82c4a75f cat /etc/ssl/certs/storage.pem +---- + -``` -$ sudo podman exec -it 5a3e82c4a75f cat /etc/ssl/certs/storage.pem +.Example output ++ +[source,terminal] +---- -----BEGIN CERTIFICATE----- -MIIDTTCCAjWgAwIBAgIJAMVr9ngjJhzbMA0GCSqGSIb3DQEBCwUAMD0xCzAJBgNV -``` \ No newline at end of file +MIIDTTCCAjWgAwIBAgIJAMVr9ngjJhzbMA0GCSqGSIb3DQEBCwUAMD0xCzAJBgNV... +-----END CERTIFICATE----- +---- \ No newline at end of file diff --git a/modules/config-debug-variables.adoc b/modules/config-debug-variables.adoc new file mode 100644 index 000000000..d95578730 --- /dev/null +++ b/modules/config-debug-variables.adoc @@ -0,0 +1,17 @@ +:_content-type: REFERENCE +[id="config-debug-variables"] += Debug variables + +The following debug variables are available on {productname}. + +.Debug configuration variables +[cols="3a,1a,2a",options="header"] +|=== +| Variable | Type | Description +| **DEBUGLOG** | Boolean | Whether to enable or disable debug logs. +| **USERS_DEBUG** |Integer. Either `0` or `1`. | Used to debug LDAP operations in clear text, including passwords. Must be used with `DEBUGLOG=TRUE`. + +[IMPORTANT] +==== +Setting `USERS_DEBUG=1` exposes credentials in clear text. This variable should be removed from the {productname} deployment after debugging. The log file that is generated with this environment variable should be scrutinized, and passwords should be removed before sending to other users. Use with caution. +==== +|=== \ No newline at end of file diff --git a/modules/config-disclaimer.adoc b/modules/config-disclaimer.adoc new file mode 100644 index 000000000..199446f38 --- /dev/null +++ b/modules/config-disclaimer.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="config-disclaimer"] += {productname} configuration disclaimer + +With both standalone and Operator-based deployments of {productname} certain features and configuration parameters are not actively used or implemented. As a result, feature flags, such as those that enable or disable certain features, and configuration parameters that are not explicitly documented or requested for documentation by Red Hat Support, should only be modified with caution. Unused features or parameters might not be fully tested, supported, or compatible with {productname}. Modifying unused features parameters might lead to unexpected issues or disruptions with your deployment. + +For information about configuring {productname} in standalone deployments, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#advanced-quay-configuration[Advanced {productname} configuration] + +For information about configuring {productname} Operator deployments, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-config-cli[Configuring {productname} on {ocp}] \ No newline at end of file diff --git a/modules/config-envvar-dbpool.adoc b/modules/config-envvar-dbpool.adoc index dd49b5cd3..85b272a47 100644 --- a/modules/config-envvar-dbpool.adoc +++ b/modules/config-envvar-dbpool.adoc @@ -1,22 +1,47 @@ -[[config-envvar-dbpool]] +:_content-type: REFERENCE +[id="config-envvar-dbpool"] = Database connection pooling {productname} is composed of many different processes which all run within the same container. Many of these processes interact with the database. -If enabled, each process that interacts with the database will contain a connection pool. These per-process connection pools are configured to maintain a maximum of 20 connections. Under heavy load, it is possible to fill the connection pool for every process within a {productname} container. Under certain deployments and loads, this may require analysis to ensure {productname} does not exceed the database’s configured maximum connection count. +Database connection pooling is enabled by default, and each process that interacts with the database contains a connection pool. These per-process connection pools are configured to maintain a maximum of 20 connections. Under heavy load, it is possible to fill the connection pool for every process within a {productname} container. Under certain deployments and loads, this might require analysis to ensure that {productname} does not exceed the configured database's maximum connection count. -Overtime, the connection pools will release idle connections. To release all connections immediately, {productname} requires a restart. +Overtime, the connection pools release idle connections. To release all connections immediately, {productname} requires a restart. -Database connection pooling may be toggled by setting the environment variable DB_CONNECTION_POOLING={true|false} +For standalone {productname} deployments, database connection pooling can be toggled off when starting your deployment. For example: + +[source,terminal] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + -e DB_CONNECTION_POOLING=false + registry.redhat.io/quay/quay-rhel8:v3.12.1 +---- + +For {productname-ocp}, database connection pooling can be configured by modifying the `QuayRegistry` custom resource definition (CRD). For example: +[source,yaml] +.Example QuayRegistry CRD +---- +spec: + components: + - kind: quay + managed: true + overrides: + env: + - name: DB_CONNECTION_POOLING + value: "false" +---- .Database connection pooling configuration [cols="3a,1a,2a",options="header"] |=== | Variable | Type | Description -| **DB_CONNECTION_POOLING** | Boolean | Enable or disable database connection pooling +| **DB_CONNECTION_POOLING** | String | Whether to enable or disable database connection pooling. Defaults to true. Accepted values are `"true"` or `"false"` |=== -If database connection pooling is enabled, it is possible to change the maximum size of the connection pool. This can be done through the following config.yaml option: +If database connection pooling is enabled, it is possible to change the maximum size of the connection pool. This can be done through the following `config.yaml` option: .config.yaml [source,yaml] diff --git a/modules/config-envvar-georepl.adoc b/modules/config-envvar-georepl.adoc index 9d23f0280..32faf8382 100644 --- a/modules/config-envvar-georepl.adoc +++ b/modules/config-envvar-georepl.adoc @@ -1,7 +1,8 @@ -[[config-envvar-georepl]] +:_content-type: REFERENCE +[id="config-envvar-georepl"] = Geo-replication -The exact same configuration should be used across all regions, with exception of the storage backend, which can be configured explicitly using the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable. +The same configuration should be used across all regions, with exception of the storage backend, which can be configured explicitly using the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable. .Geo-replication configuration [cols="3a,1a,2a",options="header"] diff --git a/modules/config-envvar-intro.adoc b/modules/config-envvar-intro.adoc index 478cbe95b..9ef3a636a 100644 --- a/modules/config-envvar-intro.adoc +++ b/modules/config-envvar-intro.adoc @@ -1,4 +1,5 @@ -[[config-envar-intro]] +:_content-type: REFERENCE +[id="config-envar-intro"] = Environment variables {productname} supports a limited number of environment variables for dynamic configuration. diff --git a/modules/config-envvar-worker-connection.adoc b/modules/config-envvar-worker-connection.adoc index f8714a060..38178f418 100644 --- a/modules/config-envvar-worker-connection.adoc +++ b/modules/config-envvar-worker-connection.adoc @@ -1,8 +1,8 @@ -[[config-envvar-worker-connection]] +:_content-type: REFERENCE +[id="config-envvar-worker-connection"] = HTTP connection counts -It is possible to specify the quantity of simultaneous HTTP connections using environment variables. These can be specified as a whole, or for a specific component. The default for each is 50 parallel connections per process. - +It is possible to specify the quantity of simultaneous HTTP connections using environment variables. These can be specified as a whole, or for a specific component. The default for each is `50` parallel connections per process. .HTTP connection counts configuration [cols="3a,1a,2a",options="header"] diff --git a/modules/config-envvar-worker-count.adoc b/modules/config-envvar-worker-count.adoc index 941da7dbd..c79f92fb4 100644 --- a/modules/config-envvar-worker-count.adoc +++ b/modules/config-envvar-worker-count.adoc @@ -1,4 +1,5 @@ -[[config-envvar-worker-count]] +:_content-type: REFERENCE +[id="config-envvar-worker-count"] = Worker count variables .Worker count variables @@ -9,11 +10,11 @@ | **WORKER_COUNT** | Number | Generic override for number of processes | **WORKER_COUNT_REGISTRY** | Number | Specifies the number of processes to handle Registry requests within the `Quay` container + + -**Values:** Integer between 8 and 64 +**Values:** Integer between `8` and `64` | **WORKER_COUNT_WEB** | Number | Specifies the number of processes to handle UI/Web requests within the container + + - **Values:** Integer between 2 and 32 + **Values:** Integer between `2` and `32` | **WORKER_COUNT_SECSCAN** | Number | Specifies the number of processes to handle Security Scanning (e.g. Clair) integration within the container + + -**Values:** Integer between 2 and 4 +**Values:** Integer. Because the Operator specifies 2 vCPUs for resource requests and limits, setting this value between `2` and `4` is safe. However, users can run more, for example, `16`, if warranted. |=== \ No newline at end of file diff --git a/modules/config-extra-ca-certs-quay.adoc b/modules/config-extra-ca-certs-quay.adoc new file mode 100644 index 000000000..872ef8d92 --- /dev/null +++ b/modules/config-extra-ca-certs-quay.adoc @@ -0,0 +1,6 @@ +[id="config-extra-ca-certs-quay"] += Adding additional Certificate Authorities for {productname} + +Certificate Authorities (CAs) are used by {productname} to verify SSL/TLS connections with external services, like ODIC providers, LDAP providers, storage providers, and so on. + +The following sections provide information about uploading additional CAs to {productname} depending on your deployment type. \ No newline at end of file diff --git a/modules/config-fields-actionlog.adoc b/modules/config-fields-actionlog.adoc index 80e934a80..61aa12e95 100644 --- a/modules/config-fields-actionlog.adoc +++ b/modules/config-fields-actionlog.adoc @@ -1,4 +1,4 @@ -[[config-fields-actionlog]] +[id="config-fields-actionlog"] = Action log configuration fields == Action log storage configuration @@ -7,64 +7,135 @@ [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_LOG_EXPORT** | Boolean | Whether to allow exporting of action logs + - + +| **FEATURE_LOG_EXPORT** | Boolean | Whether to allow exporting of action logs. + + + **Default:** `True` -| **LOGS_MODEL** | String | Enable or disable the security scanner + - + -**Values:** One of `database`, `transition_reads_both_writes_es`, `elasticsearch` + -**Default:** `database` -| **LOGS_MODEL_CONFIG** | Object | Logs model config for action logs +| **LOGS_MODEL** | String | Specifies the preferred method for handling log data. + + + +**Values:** One of `database`, `transition_reads_both_writes_es`, `elasticsearch`, `splunk` + +**Default:** `database` +| **LOGS_MODEL_CONFIG** | Object | Logs model config for action logs. + +| **ALLOW_WITHOUT_STRICT_LOGGING** | Boolean | When set to `True`, if the external log system like Splunk or ElasticSearch is intermittently unavailable, allows users to push images normally. Events are logged to the stdout instead. Overrides `ALLOW_PULLS_WITHOUT_STRICT_LOGGING` if set. + + + + **Default:** False |=== +[id="elasticsearch-log-configuration-fields"] +=== Elasticsearch configuration fields + +The following fields are available when configuring Elasticsearch for {productname}. -* **LOGS_MODEL_CONFIG** [object]: Logs model config for action logs -** **elasticsearch_config** [object]: Elasticsearch cluster configuration -*** **access_key** [string]: Elasticsearch user (or IAM key for AWS ES) +* **LOGS_MODEL_CONFIG** [object]: Logs model config for action logs. +** **elasticsearch_config** [object]: Elasticsearch cluster configuration. +*** **access_key** [string]: Elasticsearch user (or IAM key for AWS ES). **** **Example**: `some_string` -*** **host** [string]: Elasticsearch cluster endpoint +*** **host** [string]: Elasticsearch cluster endpoint. **** **Example**: `host.elasticsearch.example` -*** **index_prefix** [string]: Elasticsearch's index prefix +*** **index_prefix** [string]: Elasticsearch's index prefix. **** **Example**: `logentry_` *** **index_settings** [object]: Elasticsearch's index settings -*** **use_ssl** [boolean]: Use ssl for Elasticsearch. Defaults to True +*** **use_ssl** [boolean]: Use ssl for Elasticsearch. Defaults to `True`. **** **Example**: `True` -*** **secret_key** [string]: Elasticsearch password (or IAM secret for AWS ES) +*** **secret_key** [string]: Elasticsearch password (or IAM secret for AWS ES). **** **Example**: `some_secret_string` -*** **aws_region** [string]: Amazon web service region +*** **aws_region** [string]: Amazon web service region. **** **Example**: `us-east-1` -*** **port** [number]: Elasticsearch cluster endpoint port +*** **port** [number]: Elasticsearch cluster endpoint port. **** **Example**: `1234` -** **kinesis_stream_config** [object]: AWS Kinesis Stream configuration -*** **aws_secret_key** [string]: AWS secret key +** **kinesis_stream_config** [object]: AWS Kinesis Stream configuration. +*** **aws_secret_key** [string]: AWS secret key. **** **Example**: `some_secret_key` -*** **stream_name** [string]: Kinesis stream to send action logs to +*** **stream_name** [string]: Kinesis stream to send action logs to. **** **Example**: `logentry-kinesis-stream` -*** **aws_access_key** [string]: AWS access key +*** **aws_access_key** [string]: AWS access key. **** **Example**: `some_access_key` -*** **retries** [number]: Max number of attempts made on a single request +*** **retries** [number]: Max number of attempts made on a single request. **** **Example**: `5` -*** **read_timeout** [number]: Number of seconds before timeout when reading from a connection +*** **read_timeout** [number]: Number of seconds before timeout when reading from a connection. **** **Example**: `5` -*** **max_pool_connections** [number]: The maximum number of connections to keep in a connection pool +*** **max_pool_connections** [number]: The maximum number of connections to keep in a connection pool. **** **Example**: `10` -*** **aws_region** [string]: AWS region +*** **aws_region** [string]: AWS region. **** **Example**: `us-east-1` -*** **connect_timeout** [number]: Number of seconds before timeout when attempting to make a connection +*** **connect_timeout** [number]: Number of seconds before timeout when attempting to make a connection. **** **Example**: `5` -** **producer** [string]: Logs producer if logging to Elasticsearch +** **producer** [string]: Logs producer if logging to Elasticsearch. *** **enum**: kafka, elasticsearch, kinesis_stream *** **Example**: `kafka` -** **kafka_config** [object]: Kafka cluster configuration -*** **topic** [string]: Kafka topic to publish log entries to +** **kafka_config** [object]: Kafka cluster configuration. +*** **topic** [string]: Kafka topic to publish log entries to. **** **Example**: `logentry` -*** **bootstrap_servers** [array]: List of Kafka brokers to bootstrap the client from -*** **max_block_seconds** [number]: Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable +*** **bootstrap_servers** [array]: List of Kafka brokers to bootstrap the client from. +*** **max_block_seconds** [number]: Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable. **** **Example**: `10` +[id="splunk-configuration-fields"] +=== Splunk configuration fields + +The following fields are available when configuring Splunk for {productname}. + +//splunk +** **producer** [string]: `splunk`. Use when configuring Splunk. +** **splunk_config** [object]: Logs model configuration for Splunk action logs or the Splunk cluster configuration. +*** **host** [string]: Splunk cluster endpoint. +*** **port** [integer]: Splunk management cluster endpoint port. +*** **bearer_token** [string]: The bearer token for Splunk. +*** **verify_ssl** [boolean]: Enable (`True`) or disable (`False`) TLS/SSL verification for HTTPS connections. +*** **index_prefix** [string]: Splunk's index prefix. +*** **ssl_ca_path** [string]: The relative container path to a single `.pem` file containing a certificate authority (CA) for SSL validation. + +.Example Splunk configuration +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk + splunk_config: + host: http://.remote.csb + port: 8089 + bearer_token: + url_scheme: + verify_ssl: False + index_prefix: + ssl_ca_path: +# ... +---- + +[id="splunk-hec-configuration-fields"] +=== Splunk HEC configuration fields +The following fields are available when configuring Splunk HTTP Event Collector (HEC) for {productname}. +** **producer** [string]: `splunk_hec`. Use when configuring Splunk HEC. +** **splunk_hec_config** [object]: Logs model configuration for Splunk HTTP event collector action logs configuration. +*** **host** [string]: Splunk cluster endpoint. +*** **port** [integer]: Splunk management cluster endpoint port. +*** **hec_token** [string]: HEC token for Splunk. +*** **url_scheme** [string]: The URL scheme for access the Splunk service. If Splunk is behind SSL/TLS, must be `https`. +*** **verify_ssl** [boolean]: Enable (`true`) or disable (`false`) SSL/TLS verification for HTTPS connections. +*** **index** [string]: The Splunk index to use. +*** **splunk_host** [string]: The host name to log this event. +*** **splunk_sourcetype** [string]: The name of the Splunk `sourcetype` to use. +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk_hec + splunk_hec_config: <1> + host: prd-p-aaaaaq.splunkcloud.com <2> + port: 8088 <3> + hec_token: 12345678-1234-1234-1234-1234567890ab <4> + url_scheme: https <5> + verify_ssl: False <6> + index: quay <7> + splunk_host: quay-dev <8> + splunk_sourcetype: quay_logs <9> +# ... +---- == Action log rotation and archiving configuration @@ -72,21 +143,28 @@ [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_ACTION_LOG_ROTATION** | Boolean | Enabling log rotation and archival will move all logs older than 30 days to storage + - + -**Default:** `false` +| **FEATURE_ACTION_LOG_ROTATION** | Boolean | Enabling log rotation and archival will move all logs older than 30 days to storage. + + + +**Default:** `false` -| **ACTION_LOG_ARCHIVE_LOCATION** | String | If action log archiving is enabled, the storage engine in which to place the archived data + - + +| **ACTION_LOG_ARCHIVE_LOCATION** | String | If action log archiving is enabled, the storage engine in which to place the archived data. + + + **Example:**: `s3_us_east` -| **ACTION_LOG_ARCHIVE_PATH** | String | If action log archiving is enabled, the path in storage in which to place the archived data + - + +| **ACTION_LOG_ARCHIVE_PATH** | String | If action log archiving is enabled, the path in storage in which to place the archived data. + + + **Example:** `archives/actionlogs` -| **ACTION_LOG_ROTATION_THRESHOLD** | String | The time interval after which to rotate logs + - + +| **ACTION_LOG_ROTATION_THRESHOLD** | String | The time interval after which to rotate logs. + + + **Example:** `30d` |=== +== Action log audit configuration - - +.Audit logs configuration field +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **ACTION_LOG_AUDIT_LOGINS** | Boolean | When set to `True`, tracks advanced events such as logging into, and out of, the UI, and logging in using Docker for regular users, robot accounts, and for application-specific token accounts. + + + +**Default:** `True` +|=== diff --git a/modules/config-fields-basic.adoc b/modules/config-fields-basic.adoc index e5f156306..f5266a08d 100644 --- a/modules/config-fields-basic.adoc +++ b/modules/config-fields-basic.adoc @@ -5,56 +5,40 @@ [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **REGISTRY_TITLE** | String | If specified, the long-form title for the registry. It should not exceed 35 characters. It will be displayed in the frontend of your {productname} deployment, for example, in your browser tab + - + -**Default:** + +| **REGISTRY_TITLE** | String | If specified, the long-form title for the registry. Displayed in frontend of your {productname} deployment, for example, at the sign in page of your organization. Should not exceed 35 characters. + + +**Default:** + `Red Hat Quay` -| **REGISTRY_TITLE_SHORT** | String | If specified, the short-form title for the registry. It will be displayed in the frontend of your {productname} deployment, for example, in your browser tab + - + -**Default:** + +| **REGISTRY_TITLE_SHORT** | String | If specified, the short-form title for the registry. Title is displayed on various pages of your organization, for example, as the title of the tutorial on your organization's *Tutorial* page. + + +**Default:** + `Red Hat Quay` -| {nbsp} | {nbsp} |{nbsp} -| **BRANDING** | Object | Custom branding for logos and URLs in the {productname} UI. - -| **.logo** + -(Required) | String | Main logo image URL. + - + -**Example:** + -`/static/img/quay-horizontal-color.svg` -| **.footer_img** | String | Logo for UI footer. + - + -**Example:** + -`/static/img/RedHat.svg` -| **.footer_url** | String | Link for footer image. + - + -**Example:** + -`https://redhat.com` | **CONTACT_INFO** | Array of String | If specified, contact information to display on the contact page. If only a single piece of contact information is specified, the contact footer will link directly. -|**[0]** | String | Adds a link to send an e-mail. + - + -**Pattern:** + -`^mailto:(.)+$` + -**Example:** + +|**[0]** | String | Adds a link to send an e-mail. + + + +**Pattern:** + +`^mailto:(.)+$` + +**Example:** + `mailto:support@quay.io` -|**[1]** | String | Adds a link to visit an IRC chat room. + - + -**Pattern:** + -`^irc://(.)+$` + -**Example:** + +|**[1]** | String | Adds a link to visit an IRC chat room. + + + +**Pattern:** + +`^irc://(.)+$` + +**Example:** + `irc://chat.freenode.net:6665/quay` -|**[2]** | String | Adds a link to call a phone number.+ - + -**Pattern:** + -`^tel:(.)+$` + -**Example:** + +|**[2]** | String | Adds a link to call a phone number. + + + +**Pattern:** + +`^tel:(.)+$` + +**Example:** + `tel:+1-888-930-3475` -|**[3]** | String |Adds a link to a defined URL. + - + -**Pattern:** + -`^http(s)?://(.)+$` + -**Example:** + +|**[3]** | String |Adds a link to a defined URL. + + + +**Pattern:** + +`^http(s)?://(.)+$` + +**Example:** + `https://twitter.com/quayio` |=== \ No newline at end of file diff --git a/modules/config-fields-branding.adoc b/modules/config-fields-branding.adoc new file mode 100644 index 000000000..9ddd89623 --- /dev/null +++ b/modules/config-fields-branding.adoc @@ -0,0 +1,38 @@ +:_content-type: CONCEPT +[id="config-fields-branding"] += Branding configuration fields + +.Branding configuration fields +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **BRANDING** | Object | Custom branding for logos and URLs in the {productname} UI. + +| **.logo** + +(Required) | String | Main logo image URL. + + +The header logo defaults to 205x30 PX. The form logo on the {productname} sign in screen of the web UI defaults to 356.5x39.7 PX. + + +**Example:** + +`/static/img/quay-horizontal-color.svg` +| **.footer_img** | String | Logo for UI footer. Defaults to 144x34 PX. + + + +**Example:** + +`/static/img/RedHat.svg` +| **.footer_url** | String | Link for footer image. + + + +**Example:** + +`https://redhat.com` +|=== + +[id="example-config-fields-branding"] +== Example configuration for {productname} branding + +.Branding config.yaml example +[source,yaml] +---- +BRANDING: + logo: https://www.mend.io/wp-content/media/2020/03/5-tips_small.jpg + footer_img: https://www.mend.io/wp-content/media/2020/03/5-tips_small.jpg + footer_url: https://opensourceworld.org/ +---- \ No newline at end of file diff --git a/modules/config-fields-build-logs.adoc b/modules/config-fields-build-logs.adoc index fc7ea522a..69a950ee0 100644 --- a/modules/config-fields-build-logs.adoc +++ b/modules/config-fields-build-logs.adoc @@ -1,17 +1,17 @@ -[[config-fields-build-logs]] +[id="config-fields-build-logs"] = Build logs configuration fields .Build logs configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_READER_BUILD_LOGS** | Boolean | If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. + +| **FEATURE_READER_BUILD_LOGS** | Boolean | If set to true, build logs can be read by those with `read` access to the repository, rather than only `write` access or `admin` access. + + **Default:** `False` -| **LOG_ARCHIVE_LOCATION** | String | The storage location, defined in DISTRIBUTED_STORAGE_CONFIG, in which to place the archived build logs + +| **LOG_ARCHIVE_LOCATION** | String | The storage location, defined in `DISTRIBUTED_STORAGE_CONFIG`, in which to place the archived build logs. + + **Example:** `s3_us_east` -| **LOG_ARCHIVE_PATH** | String | The path under the configured storage engine in which to place the archived build logs in JSON form + +| **LOG_ARCHIVE_PATH** | String | The path under the configured storage engine in which to place the archived build logs in `.JSON` format. + + **Example:** `archives/buildlogs` |=== \ No newline at end of file diff --git a/modules/config-fields-build-manager.adoc b/modules/config-fields-build-manager.adoc new file mode 100644 index 000000000..cc824bed8 --- /dev/null +++ b/modules/config-fields-build-manager.adoc @@ -0,0 +1,50 @@ +[id="config-fields-build-manager"] += Build manager configuration fields + +.Build manager configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +|*ALLOWED_WORKER_COUNT* |String | Defines how many Build Workers are instantiated per {productname} pod. Typically set to `1`. +|*ORCHESTRATOR_PREFIX* |String | Defines a unique prefix to be added to all Redis keys. This is useful to isolate Orchestrator values from other Redis keys. +|*REDIS_HOST* |Object | The hostname for your Redis service. +|*REDIS_PASSWORD* |String | The password to authenticate into your Redis service. +|*REDIS_SSL* |Boolean |Defines whether or not your Redis connection uses SSL/TLS. +|*REDIS_SKIP_KEYSPACE_EVENT_SETUP* |Boolean | By default, {productname} does not set up the keyspace events required for key events at runtime. To do so, set `REDIS_SKIP_KEYSPACE_EVENT_SETUP` to `false`. +|*EXECUTOR* |String | Starts a definition of an Executor of this type. Valid values are `kubernetes` and `ec2`. +|*BUILDER_NAMESPACE* |String | Kubernetes namespace where {productname} Builds will take place. +|*K8S_API_SERVER* |Object | Hostname for API Server of the {ocp} cluster where Builds will take place. +|*K8S_API_TLS_CA* |Object | The filepath in the `Quay` container of the Build cluster's CA certificate for the `Quay` application to trust when making API calls. +|*KUBERNETES_DISTRIBUTION* |String | Indicates which type of Kubernetes is being used. Valid values are `openshift` and `k8s`. +|*CONTAINER_** |Object | Define the resource requests and limits for each `build` pod. +|*NODE_SELECTOR_** |Object | Defines the node selector label name-value pair where `build` Pods should be scheduled. +|*CONTAINER_RUNTIME* |Object | Specifies whether the Builder should run `docker` or `podman`. Customers using Red Hat's `quay-builder` image should set this to `podman`. +|*SERVICE_ACCOUNT_NAME/SERVICE_ACCOUNT_TOKEN* |Object | Defines the Service Account name or token that will be used by `build` pods. +|*QUAY_USERNAME/QUAY_PASSWORD* |Object | Defines the registry credentials needed to pull the {productname} build worker image that is specified in the `WORKER_IMAGE` field. +ifdef::upstream[] +This is useful if pulling a non-public quay-builder image from quay.io. +endif::upstream[] +ifdef::downstream[] +Customers should provide a Red Hat Service Account credential as defined in the section "Creating Registry Service Accounts" against registry.redhat.io in the article at https://access.redhat.com/RegistryAuthentication. +endif::downstream[] +|*WORKER_IMAGE* |Object |Image reference for the {productname} Builder image. +ifdef::upstream[] +quay.io/quay/quay-builder +endif::upstream[] +ifdef::downstream[] +registry.redhat.io/quay/quay-builder +endif::downstream[] +|*WORKER_TAG* |Object |Tag for the Builder image desired. The latest version is {producty}. +|*BUILDER_VM_CONTAINER_IMAGE* |Object | The full reference to the container image holding the internal VM needed to run each {productname} Build. +ifdef::upstream[] +(`quay.io/quay/quay-builder-qemu-fedoracoreos:latest`). +endif::upstream[] +ifdef::downstream[] +(`registry.redhat.io/quay/quay-builder-qemu-rhcos:{producty}`). +endif::downstream[] +|*SETUP_TIME* |String | Specifies the number of seconds at which a Build times out if it has not yet registered itself with the Build Manager. Defaults at `500` seconds. Builds that time out are attempted to be restarted three times. If the Build does not register itself after three attempts it is considered failed. + +|*MINIMUM_RETRY_THRESHOLD* |String | This setting is used with multiple Executors. It indicates how many retries are attempted to start a Build before a different Executor is chosen. Setting to `0` means there are no restrictions on how many tries the build job needs to have. This value should be kept intentionally small (three or less) to ensure failovers happen quickly during infrastructure failures. You must specify a value for this setting. For example, `Kubernetes` is set as the first executor and `EC2` as the second executor. If you want the last attempt to run a job to always be executed on EC2 and not Kubernetes, you can set the Kubernetes executor's `MINIMUM_RETRY_THRESHOLD` to `1` and EC2's `MINIMUM_RETRY_THRESHOLD` to `0` (defaults to `0` if not set). In this case, the Kubernetes' `MINIMUM_RETRY_THRESHOLD` *retries_remaining(1)* would evaluate to `False`, therefore falling back to the second executor configured. +|*SSH_AUTHORIZED_KEYS* |Object | List of SSH keys to bootstrap in the `ignition` config. This allows other keys to be used to SSH into the EC2 instance or QEMU virtual machine (VM). +|=== + diff --git a/modules/config-fields-clair-auth.adoc b/modules/config-fields-clair-auth.adoc index 131092576..c68b309b7 100644 --- a/modules/config-fields-clair-auth.adoc +++ b/modules/config-fields-clair-auth.adoc @@ -5,7 +5,6 @@ The following authorization configuration fields are available for Clair. [cols="3a,1a,2a",options="header"] - |=== | Field | Type | Description | **auth** | Object | Defines Clair's external and intra-service JWT based authentication. If multiple `auth` mechanisms are defined, Clair picks one. Currently, multiple mechanisms are unsupported. @@ -16,3 +15,19 @@ The following authorization configuration fields are available for Clair. | **.psk.iss** | String | A list of JWT issuers to verify. An empty list accepts any issuer in a JWT claim. |=== + +[discrete] +== Example authorization configuration + +The following `authorization` snippet is for a minimal configuration. + +.Example authorization configuration +[source,yaml] +---- +# ... +auth: + psk: + key: MTU5YzA4Y2ZkNzJoMQ== <1> + iss: ["quay"] +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-indexer.adoc b/modules/config-fields-clair-indexer.adoc index 08f2f05b1..81f4467ea 100644 --- a/modules/config-fields-clair-indexer.adoc +++ b/modules/config-fields-clair-indexer.adoc @@ -2,7 +2,7 @@ [id="config-fields-clair-indexer"] = Clair indexer configuration fields -The following indexer configuration fields are available for Clair. +The following table describes the configuration fields for Clair's `indexer` component. [cols="3a,1a,2a",options="header"] |=== @@ -32,4 +32,21 @@ Scanner allows for passing configuration options to layer scanners. The scanner | **.scanner.package** | String | A map with the name of a particular scanner and arbitrary YAML as a value. | **.scanner.repo** | String | A map with the name of a particular scanner and arbitrary YAML as a value. -|=== \ No newline at end of file +|=== + +[discrete] +== Example indexer configuration + +The following example shows a hypothetical indexer configuration for Clair. + +.Example indexer configuration +[source,yaml] +---- +# ... +indexer: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + scanlock_retry: 10 + layer_scan_concurrency: 5 + migrations: true +# ... +---- diff --git a/modules/config-fields-clair-matcher.adoc b/modules/config-fields-clair-matcher.adoc index fd8668d27..bcb797b6a 100644 --- a/modules/config-fields-clair-matcher.adoc +++ b/modules/config-fields-clair-matcher.adoc @@ -2,7 +2,7 @@ [id="config-fields-clair-matcher"] = Clair matcher configuration fields -The following matcher configuration fields are available for Clair. +The following table describes the configuration fields for Clair's `matcher` component. [NOTE] ==== @@ -24,7 +24,7 @@ Clair allows for a custom connection pool size. This number directly sets how ma This parameter will be ignored in a future version. Users should configure this through the connection string. -| **.indexer_addr** | String | A matcher contacts an indexer to create a `VulnerabilityReport`. The location of this indexer is required. +| **.indexer_addr** | String | A matcher contacts an indexer to create a vulnerability report. The location of this indexer is required. Defaults to `30m`. @@ -32,13 +32,35 @@ Defaults to `30m`. | **.period** | String | Determines how often updates for new security advisories take place. -Defaults to `30m`. +Defaults to `6h`. | **.disable_updaters** | Boolean | Whether to run background updates or not. +Default: `False` + | **.update_retention** | Integer | Sets the number of update operations to retain between garbage collection cycles. This should be set to a safe MAX value based on database size constraints. Defaults to `10m`. If a value of less than `0` is provided, garbage collection is disabled. `2` is the minimum value to ensure updates can be compared to notifications. -|=== \ No newline at end of file +|=== + +[discrete] +== Example matcher configuration + +.Example matcher configuration +[source,yaml] +---- +# ... +matcher: + connstring: >- + host= port=5432 dbname= user= password=D + sslmode=verify-ca sslcert=/etc/clair/ssl/cert.pem sslkey=/etc/clair/ssl/key.pem + sslrootcert=/etc/clair/ssl/ca.pem + indexer_addr: http://clair-v4/ + disable_updaters: false + migrations: true + period: 6h + update_retention: 2 +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-matchers.adoc b/modules/config-fields-clair-matchers.adoc index ab614504b..4c3e95396 100644 --- a/modules/config-fields-clair-matchers.adoc +++ b/modules/config-fields-clair-matchers.adoc @@ -2,31 +2,42 @@ [id="config-fields-clair-matchers"] = Clair matchers configuration fields -The following matchers configuration fields are available for Clair. +The following table describes the configuration fields for Clair's `matchers` component. [NOTE] ==== Differs from `matcher` configuration fields. ==== +.Matchers configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **matchers** | Array of strings | Provides configuration for the in-tree `matchers` and `remotematchers`. +| **matchers** | Array of strings | Provides configuration for the in-tree `matchers`. -| **.names** | String | A list of string values informing the matcher factory about enabled matchers. If value is set to `null`, the default list of matchers run: -*alpine*, *aws*, *debian*, *oracle*, *photon*, *python*, *python*, *rhel*, *suse*, *ubuntu*, *crda* +| **.names** | String | A list of string values informing the matcher factory about enabled matchers. If value is set to `null`, the default list of matchers run. The following strings are accepted: +*alpine-matcher*, *aws-matcher*, *debian-matcher*, *gobin*, *java-maven*, *oracle*, *photon*, *python*, *rhel*, *rhel-container-matcher*, *ruby*, *suse*, *ubuntu-matcher* | **.config** | String | Provides configuration to a specific matcher. A map keyed by the name of the matcher containing a sub-object which will be provided to the matchers factory constructor. For example: +|=== + +[discrete] +== Example matchers configuration + +The following example shows a hypothetical Clair deployment that only requires only the `alpine`, `aws`, `debian`, `oracle` matchers. + +.Example matchers configuration [source,yaml] ---- -config: - python: - ignore_vulns: - - CVE-XYZ - - CVE-ABC ----- -|=== \ No newline at end of file +# ... +matchers: + names: + - "alpine-matcher" + - "aws" + - "debian" + - "oracle" +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-metrics.adoc b/modules/config-fields-clair-metrics.adoc index 983fd4cef..0acb7c719 100644 --- a/modules/config-fields-clair-metrics.adoc +++ b/modules/config-fields-clair-metrics.adoc @@ -15,4 +15,20 @@ The following metrics configuration fields are available for Clair. | **.prometheus** | String | Configuration for a Prometheus metrics exporter. | **.prometheus.endpoint** | String | Defines the path where metrics are served. -|=== \ No newline at end of file +|=== + +[discrete] +== Example metrics configuration + +The following example shows a hypothetical metrics configuration for Clair. + +.Example metrics configuration +[source,yaml] +---- +# ... +metrics: + name: "prometheus" + prometheus: + endpoint: "/metricsz" +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-notifiers.adoc b/modules/config-fields-clair-notifiers.adoc index eccfd2e6c..87afce75e 100644 --- a/modules/config-fields-clair-notifiers.adoc +++ b/modules/config-fields-clair-notifiers.adoc @@ -2,10 +2,9 @@ [id="config-fields-clair-notifiers"] = Clair notifier configuration fields -The following notifier configuration fields are available for Clair. +The general notifier configuration fields for Clair are listed below. [cols="3a,1a,2a",options="header"] - |=== | Field | Type | Description | **notifier** | Object | Provides Clair notifier node configuration. @@ -23,6 +22,43 @@ The following notifier configuration fields are available for Clair. | **.delivery_interval** | String | The frequency at which the notifier attempts delivery of created, or previously failed, notifications. | **.disable_summary** | Boolean | Controls whether notifications should be summarized to one per manifest. +|=== + +[discrete] +== Example notifier configuration + +The following `notifier` snippet is for a minimal configuration. + +.Example notifier configuration +[source,yaml] +---- +# ... +notifier: + connstring: >- + host=DB_HOST port=5432 dbname=notifier user=DB_USER password=DB_PASS + sslmode=verify-ca sslcert=/etc/clair/ssl/cert.pem sslkey=/etc/clair/ssl/key.pem + sslrootcert=/etc/clair/ssl/ca.pem + indexer_addr: http://clair-v4/ + matcher_addr: http://clair-v4/ + delivery_interval: 5s + migrations: true + poll_interval: 15s + webhook: + target: "http://webhook/" + callback: "http://clair-notifier/notifier/api/v1/notifications" + headers: "" + amqp: null + stomp: null +# ... +---- + +[id="clair-webhook-config-fields"] +== Clair webhook configuration fields + +The following webhook fields are available for the Clair notifier environment. + +.Clair webhook fields +|=== | **.webhook** | Object | Configures the notifier for webhook delivery. @@ -33,7 +69,29 @@ The following notifier configuration fields are available for Clair. This will typically be where the Clair notifier is hosted. | **.webhook.headers** | String | A map associating a header name to a list of values. +|=== + +[discrete] +== Example webhook configuration + +.Example webhook configuration +[source,yaml] +---- +# ... +notifier: +# ... + webhook: + target: "http://webhook/" + callback: "http://clair-notifier/notifier/api/v1/notifications" +# ... +---- + +[id="clair-amqp-config-fields"] +== Clair amqp configuration fields +The following Advanced Message Queuing Protocol (AMQP) fields are available for the Clair notifier environment. + +|=== | **.amqp** | Object | Configures the notifier for AMQP delivery. [NOTE] @@ -73,7 +131,42 @@ Clair also allows `SSL_CERT_DIR`, as documented for the Go `crypto/x509` package ==== | **.amqp.tls.key** | String | The filesystem path where a TLS/SSL private key can be read. +|=== + +[discrete] +== Example AMQP configuration + +The following example shows a hypothetical AMQP configuration for Clair. + +.Example AMQP configuration +[source,yaml] +---- +# ... +notifier: +# ... + amqp: + exchange: + name: "" + type: "direct" + durable: true + auto_delete: false + uris: ["amqp://user:pass@host:10000/vhost"] + direct: false + routing_key: "notifications" + callback: "http://clair-notifier/notifier/api/v1/notifications" + tls: + root_ca: "optional/path/to/rootca" + cert: "madatory/path/to/cert" + key: "madatory/path/to/key" +# ... +---- + +[id="clair-stomp-config-fields"] +== Clair STOMP configuration fields + +The following Simple Text Oriented Message Protocol (STOMP) fields are available for the Clair notifier environment. +|=== | **.stomp** | Object | Configures the notifier for STOMP delivery. | **.stomp.direct** | Boolean | If `true`, the notifier delivers individual notifications (not a callback) to the configured STOMP broker. @@ -104,4 +197,29 @@ Clair also respects `SSL_CERT_DIR`, as documented for the Go `crypto/x509` packa | **.stomp.user.login** | String | The STOMP login to connect with. | **.stomp.user.passcode** | String | The STOMP passcode to connect with. -|=== \ No newline at end of file +|=== + +[discrete] +== Example STOMP configuration + +The following example shows a hypothetical STOMP configuration for Clair. + +.Example STOMP configuration +[source,yaml] +---- +# ... +notifier: +# ... + stomp: + desitnation: "notifications" + direct: false + callback: "http://clair-notifier/notifier/api/v1/notifications" + login: + login: "username" + passcode: "passcode" + tls: + root_ca: "optional/path/to/rootca" + cert: "madatory/path/to/cert" + key: "madatory/path/to/key" +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-trace.adoc b/modules/config-fields-clair-trace.adoc index 499eb7e74..9f4f17d17 100644 --- a/modules/config-fields-clair-trace.adoc +++ b/modules/config-fields-clair-trace.adoc @@ -5,7 +5,6 @@ The following trace configuration fields are available for Clair. [cols="3a,1a,2a",options="header"] - |=== | Field | Type | Description | **trace** | Object | Defines distributed tracing configuration based on OpenTelemetry. @@ -33,4 +32,23 @@ The following trace configuration fields are available for Clair. | **.jaeger.tags** | String | Key-value pairs to provide additional metadata. | **.jaeger.buffer_max** | Integer | The maximum number of spans that can be buffered in memory before they are sent to the Jaeger backend for storage and analysis. -|=== \ No newline at end of file +|=== + +[discrete] +== Example trace configuration + +The following example shows a hypothetical trace configuration for Clair. + +.Example trace configuration +[source,yaml] +---- +# ... +trace: + name: "jaeger" + probability: 1 + jaeger: + agent: + endpoint: "localhost:6831" + service_name: "clair" +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-updaters.adoc b/modules/config-fields-clair-updaters.adoc index fcd0e6d8c..82d40099e 100644 --- a/modules/config-fields-clair-updaters.adoc +++ b/modules/config-fields-clair-updaters.adoc @@ -2,30 +2,39 @@ [id="config-fields-clair-updaters"] = Clair updaters configuration fields -The following updaters configuration fields are available for Clair. +The following table describes the configuration fields for Clair's `updaters` component. +.Updaters configuration fields [cols="3a,1a,2a",options="header"] - |=== | Field | Type | Description | **updaters** | Object | Provides configuration for the matcher's update manager. | **.sets** | String | A list of values informing the update manager which updaters to run. -If value is set to `null`, the default set of updaters runs the following: *alpine*, *aws*, *debian*, *oracle*, *photon*, *pyupio*, *rhel*, *suse*, *ubuntu* +If value is set to `null`, the default set of updaters runs the following: *alpine*, *aws*, *clair.cvss*, *debian*, *oracle*, *photon*, *osv*, *rhel*, *rhcc* *suse*, *ubuntu* If left blank, zero updaters run. | **.config** | String | Provides configuration to specific updater sets. -A map keyed by the name of the updater set containing a sub-object which will be provided to the updater set's constructor. For example: +A map keyed by the name of the updater set containing a sub-object which will be provided to the updater set's constructor. For a list of the sub-objects for each updater, see "Advanced updater configuration". +|=== + +[discrete] +== Example updaters configuration +In the following configuration, only the `rhel` set is configured. The `ignore_unpatched` variable, which is specific to the `rhel` updater, is also defined. + +.Example updaters configuration [source,yaml] ---- -config: - ubuntu: - security_tracker_url: http://security.url - ignore_distributions: - - cosmic ----- -|=== \ No newline at end of file +# ... +updaters: + sets: + - rhel + config: + rhel: + ignore_unpatched: false +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-db.adoc b/modules/config-fields-db.adoc index b07932a20..78d774a40 100644 --- a/modules/config-fields-db.adoc +++ b/modules/config-fields-db.adoc @@ -2,19 +2,19 @@ [id="config-fields-db"] = Database configuration -This section describes the database configuration fields available for {productname} deployments. +This section describes the database configuration fields available for {productname} deployments. [id="database-uri"] == Database URI -With {productname}, connection to the database is configured by using the required `DB_URI` field. +With {productname}, connection to the database is configured by using the required `DB_URI` field. -The following table describes the `DB_URI` configuration field: +The following table describes the `DB_URI` configuration field: .Database URI [cols="3a,1a,2a",options="header"] |=== -| Field | Type | Description +| Field | Type | Description | **DB_URI** + (Required) | String | The URI for accessing the database, including any credentials. @@ -26,29 +26,29 @@ Example `DB_URI` field: [id="database-connection-arguments"] == Database connection arguments -Optional connection arguments are configured by the `DB_CONNECTION_ARGS` parameter. Some of the key-value pairs defined under `DB_CONNECTION_ARGS` are generic, while others are database specific. +Optional connection arguments are configured by the `DB_CONNECTION_ARGS` parameter. Some of the key-value pairs defined under `DB_CONNECTION_ARGS` are generic, while others are database specific. -The following table describes database connection arguments: +The following table describes database connection arguments: .Database connection arguments [cols="3a,1a,2a",options="header"] |=== -| Field | Type | Description -| **DB_CONNECTION_ARGS** | Object | Optional connection arguments for the database, such as timeouts and SSL. +| Field | Type | Description +| **DB_CONNECTION_ARGS** | Object | Optional connection arguments for the database, such as timeouts and SSL/TLS. | **.autorollback**| Boolean | Whether to use thread-local connections. + -Should always be `true` -| **.threadlocals**| Boolean | Whether to use auto-rollback connections. + -Should always be `true` +Should always be `true` +| **.threadlocals**| Boolean | Whether to use auto-rollback connections. + +Should always be `true` // TODO 36 max_connections, timeout, stale_timeout -// | {nbsp}{nbsp}{nbsp}.max_connections| Number | -// | {nbsp}{nbsp}{nbsp}.timeout | Number | -// | {nbsp}{nbsp}{nbsp}.stale_timeout | Number | +// | {nbsp}{nbsp}{nbsp}.max_connections| Number | +// | {nbsp}{nbsp}{nbsp}.timeout | Number | +// | {nbsp}{nbsp}{nbsp}.stale_timeout | Number | |=== [id="config-fields-postgres"] -=== PostgreSQL SSL connection arguments +=== PostgreSQL SSL/TLS connection arguments -With SSL, configuration depends on the database you are deploying. The following example shows a PostgreSQL SSL configuration: +With SSL/TLS, configuration depends on the database you are deploying. The following example shows a PostgreSQL SSL/TLS configuration: [source,yaml] ---- @@ -57,35 +57,35 @@ DB_CONNECTION_ARGS: sslrootcert: /path/to/cacert ---- -The `sslmode` option determines whether, or with, what priority a secure SSL TCP/IP connection will be negotiated with the server. There are six modes: +The `sslmode` option determines whether, or with, what priority a secure SSL/TLS TCP/IP connection will be negotiated with the server. There are six modes: -.SSL options +.SSL/TLS options [options="header"] -|=== +|=== |Mode |Description -| **disable** | Your configuration only tries non-SSL connections. -| **allow** | Your configuration first tries a non-SSL connection. Upon failure, tries an SSL connection. +| **disable** | Your configuration only tries non-SSL/TLS connections. +| **allow** | Your configuration first tries a non-SSL/TLS connection. Upon failure, tries an SSL/TLS connection. | **prefer** + -(Default) | Your configuration first tries an SSL connection. Upon failure, tries a non-SSL connection. -| **require** | Your configuration only tries an SSL connection. If a root CA file is present, it verifies the certificate in the same way as if verify-ca was specified. -| **verify-ca** | Your configuration only tries an SSL connection, and verifies that the server certificate is issued by a trusted certificate authority (CA). -| **verify-full** | Only tries an SSL connection, and verifies that the server certificate is issued by a trusted CA and that the requested server host name matches that in the certificate. -|=== +(Default) | Your configuration first tries an SSL/TLS connection. Upon failure, tries a non-SSL/TLS connection. +| **require** | Your configuration only tries an SSL/TLS connection. If a root CA file is present, it verifies the certificate in the same way as if verify-ca was specified. +| **verify-ca** | Your configuration only tries an SSL/TLS connection, and verifies that the server certificate is issued by a trusted certificate authority (CA). +| **verify-full** | Only tries an SSL/TLS connection, and verifies that the server certificate is issued by a trusted CA and that the requested server hostname matches that in the certificate. +|=== For more information on the valid arguments for PostgreSQL, see link:https://www.postgresql.org/docs/current/libpq-connect.html[Database Connection Control Functions]. [id="mysql-ssl-connection-arguments"] -=== MySQL SSL connection arguments +=== MySQL SSL/TLS connection arguments -The following example shows a sample MySQL SSL configuration: +The following example shows a sample MySQL SSL/TLS configuration: [source.yaml] ---- DB_CONNECTION_ARGS: - ssl: + ssl: ca: /path/to/cacert ---- -Information on the valid connection arguments for MySQL is available at link:https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html[Connecting to the Server Using URI-Like Strings or Key-Value Pairs]. +Information on the valid connection arguments for MySQL is available at link:https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html[Connecting to the Server Using URI-Like Strings or Key-Value Pairs]. diff --git a/modules/config-fields-dockerfile-build.adoc b/modules/config-fields-dockerfile-build.adoc index f89cb9ade..d0dbc3a86 100644 --- a/modules/config-fields-dockerfile-build.adoc +++ b/modules/config-fields-dockerfile-build.adoc @@ -1,7 +1,6 @@ -[[config-fields-dockerfile-build]] +[id="config-fields-dockerfile-build"] = Dockerfile build triggers fields - .Dockerfile build support [cols="3a,1a,2a",options="header"] |=== @@ -9,14 +8,13 @@ | **FEATURE_BUILD_SUPPORT** | Boolean | Whether to support Dockerfile build. + + -**Default:** False -| **SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD** | Number | If not None, the number of successive failures that -can occur before a build trigger is automatically disabled + +**Default:** `False` +| **SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD** | Number | If not set to `None`, the number of successive failures that can occur before a build trigger is automatically disabled. + + -**Default:** 100 -| **SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD** | Number | If not None, the number of successive internal errors that can occur before a build trigger is automatically disabled + +**Default:** `100` +| **SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD** | Number | If not set to `None`, the number of successive internal errors that can occur before a build trigger is automatically disabled + + -**Default:** 5 +**Default:** `5` |=== == GitHub build triggers @@ -25,20 +23,20 @@ can occur before a build trigger is automatically disabled + [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_GITHUB_BUILD** | Boolean | Whether to support GitHub build triggers + +| **FEATURE_GITHUB_BUILD** | Boolean | Whether to support GitHub build triggers. + + -**Default:** False +**Default:** `False` | {nbsp} | {nbsp} | {nbsp} -| **GITHUB_TRIGGER_CONFIG** | Object | Configuration for using GitHub (Enterprise) for build triggers +| **GITHUB_TRIGGER_CONFIG** | Object | Configuration for using GitHub Enterprise for build triggers. | {nbsp}{nbsp}{nbsp}**.GITHUB_ENDPOINT** + -{nbsp}{nbsp}{nbsp}(Required) | String | The endpoint for GitHub (Enterprise) + +{nbsp}{nbsp}{nbsp}(Required) | String | The endpoint for GitHub Enterprise. + + **Example:** `https://github.com/` -| {nbsp}{nbsp}{nbsp}**.API_ENDPOINT** | String | The endpoint of the GitHub (Enterprise) API to use. Must be overridden for `github.com` + +| {nbsp}{nbsp}{nbsp}**.API_ENDPOINT** | String | The endpoint of the GitHub Enterprise API to use. Must be overridden for `github.com`. + + **Example**: `https://api.github.com/` | {nbsp}{nbsp}{nbsp}**.CLIENT_ID** + -{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance; this cannot be shared with GITHUB_LOGIN_CONFIG. +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance; this cannot be shared with `GITHUB_LOGIN_CONFIG`. | {nbsp}{nbsp}{nbsp}**.CLIENT_SECRET** + {nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance. |=== @@ -49,15 +47,15 @@ can occur before a build trigger is automatically disabled + [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_BITBUCKET_BUILD** | Boolean | Whether to support Bitbucket build triggers + +| **FEATURE_BITBUCKET_BUILD** | Boolean | Whether to support Bitbucket build triggers. + + **Default:** False | {nbsp} | {nbsp} | {nbsp} -| **BITBUCKET_TRIGGER_CONFIG** | Object | Configuration for using BitBucket for build triggers +| **BITBUCKET_TRIGGER_CONFIG** | Object | Configuration for using BitBucket for build triggers. | {nbsp}{nbsp}{nbsp}**.CONSUMER_KEY** + -{nbsp}{nbsp}{nbsp}(Required) | String | The registered consumer key (client ID) for this Quay instance +{nbsp}{nbsp}{nbsp}(Required) | String | The registered consumer key (client ID) for this {productname} instance. | {nbsp}{nbsp}{nbsp}**.CONSUMER_SECRET** + -{nbsp}{nbsp}{nbsp}(Required) | String | The registered consumer secret (client secret) for this Quay instance +{nbsp}{nbsp}{nbsp}(Required) | String | The registered consumer secret (client secret) for this {productname} instance. |=== == GitLab build triggers @@ -66,15 +64,15 @@ can occur before a build trigger is automatically disabled + [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_GITLAB_BUILD** | Boolean | Whether to support GitLab build triggers + +| **FEATURE_GITLAB_BUILD** | Boolean | Whether to support GitLab build triggers. + + **Default:** False | {nbsp} | {nbsp} | {nbsp} -| **GITLAB_TRIGGER_CONFIG** | Object | Configuration for using Gitlab for build triggers +| **GITLAB_TRIGGER_CONFIG** | Object | Configuration for using Gitlab for build triggers. | {nbsp}{nbsp}{nbsp}**.GITLAB_ENDPOINT** + -{nbsp}{nbsp}{nbsp}(Required) | String | The endpoint at which Gitlab (Enterprise) is running +{nbsp}{nbsp}{nbsp}(Required) | String | The endpoint at which Gitlab Enterprise is running. | {nbsp}{nbsp}{nbsp}**.CLIENT_ID** + -{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this Quay instance +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance. | {nbsp}{nbsp}{nbsp}**.CLIENT_SECRET** + -{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this Quay instance +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance. |=== diff --git a/modules/config-fields-features-misc.adoc b/modules/config-fields-features-misc.adoc deleted file mode 100644 index 02ee92735..000000000 --- a/modules/config-fields-features-misc.adoc +++ /dev/null @@ -1,55 +0,0 @@ -[[config-fields-features-misc]] -== Miscellaneous features - - -.Miscellaneous features -[cols="3a,1a,2a",options="header"] -|=== -| Field | Type | Description -| ** FEATURE_FIPS** | Boolean | If set to true, Quay will run using FIPS compliant hash functions + - + - **Default:** False -| **FEATURE_ADVERTISE_V2** | Boolean | Whether the v2/ endpoint is visible + - + - **Default:** True -| **FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL** | Boolean | Whether to allow retrieval of aggregated log counts + - + - **Default:** True - | **FEATURE_ANONYMOUS_ACCESS** | Boolean | Whether to allow anonymous users to browse and pull public repositories + - + -**Default:** True -| **FEATURE_APP_REGISTRY** | Boolean | Whether to enable support for App repositories + - + -**Default:** False -| **FEATURE_READONLY_APP_REGISTRY** | Boolean | Whether to App repositories are read-only + - + -**Default:** False -| **FEATURE_DIRECT_LOGIN** | Boolean | Whether users can directly login to the UI + - + -**Default:** True -| **FEATURE_LIBRARY_SUPPORT** | Boolean | Whether to allow for "namespace-less" repositories when pulling and pushing from Docker + - + -**Default:** True -| **FEATURE_PARTIAL_USER_AUTOCOMPLETE** | Boolean | If set to true, autocompletion will apply to partial usernames+ - + -**Default:** True -| **FEATURE_PERMANENT_SESSIONS** | Boolean | Whether sessions are permanent + - + -**Default:** True -| **FEATURE_PUBLIC_CATALOG** | Boolean | If set to true, the `_catalog` endpoint returns public repositories. Otherwise, only private repositories can be returned. + - + -**Default:** False -| **FEATURE_RATE_LIMITS** | Boolean | Whether to enable rate limits on API and registry endpoints. Setting FEATURE_RATE_LIMITS to `true` causes `nginx` to limit certain API calls to 30 per second. If that feature is not set, API calls are limited to 300 per second (effectively unlimited). + - + -**Default:** False -| **FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH** | Boolean | Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth + - + -**Default:** False -| **FEATURE_REQUIRE_TEAM_INVITE** | Boolean | Whether to require invitations when adding a user to a team + - + -**Default:** True - -|=== - - - diff --git a/modules/config-fields-footer.adoc b/modules/config-fields-footer.adoc new file mode 100644 index 000000000..20c205994 --- /dev/null +++ b/modules/config-fields-footer.adoc @@ -0,0 +1,40 @@ +:_content-type: CONCEPT +[id="config-fields-footer"] += UI footer configuration fields + +The following configuration fields have been added to the original (v1) UI. You can use these fields to customize the footer of your on-prem v1 UI. + +|=== +| Field | Type | Description + +|*FOOTER_LINKS* |Object | Enable customization of footer links in {productname}'s UI for on-prem installations. + +|*.TERMS_OF_SERVICE_URL* | String | Custom terms of service for on-prem installations. + + + +**Example:** + +`https://index.hr` + +|*.PRIVACY_POLICY_URL* | String | Custom privacy policy for on-prem installations. + + + +**Example:** + +`https://example.hr` +|*.SECURITY_URL* | String | Custom security page for on-prem installations. + + + +**Example:** + +`https://example.hr` + +| **.ABOUT_URL** | String | Custom about page for on-prem installations. + + + +**Example:** + +`https://example.hr` +|=== + +.Example footer links YAML +[source,yaml] +---- +FOOTER_LINKS: + "TERMS_OF_SERVICE_URL": "https://www.index.hr" + "PRIVACY_POLICY_URL": "https://www.example.hr" + "SECURITY_URL": "https://www.example.hr" + "ABOUT_URL": "https://www.example.hr" +---- \ No newline at end of file diff --git a/modules/config-fields-general-clair.adoc b/modules/config-fields-general-clair.adoc index 24e2ccafd..2bda71416 100644 --- a/modules/config-fields-general-clair.adoc +++ b/modules/config-fields-general-clair.adoc @@ -2,7 +2,7 @@ [id="config-fields-required-clair"] = Clair general fields -The following section describes the general configuration fields available for a Clair deployment: +The following table describes the general configuration fields available for a Clair deployment. [cols="3a,1a,2a",options="header"] |=== @@ -19,3 +19,18 @@ Default: `:6060` | **.cert** | String | The TLS certificate to be used. Must be a full-chain certificate. |=== + +[discrete] +== Example configuration for general Clair fields + +The following example shows a Clair configuration. + +.Example configuration for general Clair fields +[source,yaml] +---- +# ... +http_listen_addr: 0.0.0.0:6060 +introspection_addr: 0.0.0.0:8089 +log_level: info +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-hcp.adoc b/modules/config-fields-hcp.adoc new file mode 100644 index 000000000..07e16e55a --- /dev/null +++ b/modules/config-fields-hcp.adoc @@ -0,0 +1,24 @@ +:_content-type: REFERENCE +[id="config-fields-hcp"] += Hitachi Content Platform object storage + +The following YAML shows a sample configuration using HCP for object storage. + +.Example HCP storage configuration +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + hcp_us: + - RadosGWStorage + - access_key: + bucket_name: + hostname: + is_secure: true + secret_key: + storage_path: /datastorage/registry + signature_version: v4 +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: +- hcp_us +DISTRIBUTED_STORAGE_PREFERENCE: +- hcp_us +---- \ No newline at end of file diff --git a/modules/config-fields-helm-oci.adoc b/modules/config-fields-helm-oci.adoc index 189d0df21..bf8ec750a 100644 --- a/modules/config-fields-helm-oci.adoc +++ b/modules/config-fields-helm-oci.adoc @@ -1,28 +1,45 @@ -[[config-fields-helm-oci]] -= OCI and Helm configuration fields +:_content-type: REFERENCE +[id="config-fields-helm-oci"] += Helm configuration fields -Support for Helm is now supported under the `FEATURE_GENERAL_OCI_SUPPORT` property. If you need to explicitly enable the feature, for example, if it has previously been disabled or if you have upgraded from a version where it is not enabled by default, you need to add two properties in the Quay configuration to enable the use of OCI artifacts: - -[source,yaml] ----- -FEATURE_GENERAL_OCI_SUPPORT: true -FEATURE_HELM_OCI_SUPPORT: true ----- - - -.OCI and Helm configuration fields +.Helm configuration fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_GENERAL_OCI_SUPPORT** | Boolean | Enable support for OCI artifacts + - + -**Default:** True -| **FEATURE_HELM_OCI_SUPPORT** | Boolean | Enable support for Helm artifacts + +| **FEATURE_GENERAL_OCI_SUPPORT** | Boolean | Enable support for OCI artifacts. + + **Default:** True |=== -[IMPORTANT] -==== -As of {productname} 3.6, `FEATURE_HELM_OCI_SUPPORT` has been deprecated and will be removed in a future version of {productname}. In {productname} 3.6, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. -==== +The following Open Container Initiative (OCI) artifact types are built into {productname} by default and are enabled through the *FEATURE_GENERAL_OCI_SUPPORT* configuration field: + +[cols="1a,3a,3a",options="header"] +|=== +| Field | Media Type | Supported content types + +| *Helm* | `application/vnd.cncf.helm.config.v1+json` | `application/tar+gzip`, `application/vnd.cncf.helm.chart.content.v1.tar+gzip` + +| *Cosign* | `application/vnd.oci.image.config.v1+json` | `application/vnd.dev.cosign.simplesigning.v1+json`, `application/vnd.dsse.envelope.v1+json` + +| *SPDX* | `application/vnd.oci.image.config.v1+json` | `text/spdx`, `text/spdx+xml`, `text/spdx+json` + +| *Syft* | `application/vnd.oci.image.config.v1+json` | `application/vnd.syft+json` + +| *CycloneDX* | `application/vnd.oci.image.config.v1+json` | `application/vnd.cyclonedx`, `application/vnd.cyclonedx+xml`, `application/vnd.cyclonedx+json` + +| *In-toto* | `application/vnd.oci.image.config.v1+json` | `application/vnd.in-toto+json` + +| *Unknown* | `application/vnd.cncf.openpolicyagent.policy.layer.v1+rego` | `application/vnd.cncf.openpolicyagent.policy.layer.v1+rego`, `application/vnd.cncf.openpolicyagent.data.layer.v1+json` + +|=== + +[id="configuring-helm-config"] +== Configuring Helm + +The following YAML is the example configuration when enabling Helm. + +.Helm YAML configuration +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +---- diff --git a/modules/config-fields-ibmcloudstorage.adoc b/modules/config-fields-ibmcloudstorage.adoc new file mode 100644 index 000000000..dc450b38b --- /dev/null +++ b/modules/config-fields-ibmcloudstorage.adoc @@ -0,0 +1,25 @@ +:_content-type: REFERENCE +[id="config-fields-ibmcloudstorage"] += IBM Cloud object storage + +The following YAML shows a sample configuration using IBM Cloud object storage. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - IBMCloudStorage #actual driver + - access_key: #parameters + secret_key: + bucket_name: + hostname: + is_secure: 'true' + port: '443' + storage_path: /datastorage/registry + maximum_chunk_size_mb: 100mb <1> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: +- default +DISTRIBUTED_STORAGE_PREFERENCE: +- default +---- +<1> Optional. Recommended to be set to `100mb`. \ No newline at end of file diff --git a/modules/config-fields-ipv6.adoc b/modules/config-fields-ipv6.adoc index f395f913b..d08407c55 100644 --- a/modules/config-fields-ipv6.adoc +++ b/modules/config-fields-ipv6.adoc @@ -1,12 +1,12 @@ -:_content-type: CONCEPT +:_content-type: REFERENCE [id="config-fields-ipv6"] -= IPv6 configuration field += IPv6 configuration field .IPv6 configuration field [cols="3a,1a,2a",options="header"] |=== -|Field | Type |Description -| **FEATURE_LISTEN_IP_VERSION** | String | Enables IPv4, IPv6, or dual-stack protocol family. This configuration field must be properly set, otherwise {productname} fails to start. +|Field | Type |Description +| **FEATURE_LISTEN_IP_VERSION** | String | Enables IPv4, IPv6, or dual-stack protocol family. This configuration field must be properly set, otherwise {productname} fails to start. *Default:* `IPv4` diff --git a/modules/config-fields-ldap.adoc b/modules/config-fields-ldap.adoc index 7b6fd0404..dae9d2806 100644 --- a/modules/config-fields-ldap.adoc +++ b/modules/config-fields-ldap.adoc @@ -1,16 +1,17 @@ -[[config-fields-ldap]] +:_content-type: REFERENCE +[id="config-fields-ldap"] = LDAP configuration fields .LDAP configuration -[cols="3a,1a,2a",options="header"] +[cols="2a,1a,2a",options="header"] |=== | Field | Type | Description | **AUTHENTICATION_TYPE** + -(Required) | String | Must be set to `LDAP` -| **FEATURE_TEAM_SYNCING** | Boolean | Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone) + +(Required) | String | Must be set to `LDAP`. +| **FEATURE_TEAM_SYNCING** | Boolean | Whether to allow for team membership to be synced from a backing group in the authentication engine (OIDC, LDAP, or Keystone). + + **Default:** `true` -| **FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP** | Boolean | If enabled, non-superusers can setup syncing on teams using LDAP + +| **FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP** | Boolean | If enabled, non-superusers can setup team syncrhonization. + + **Default:** `false` | **LDAP_ADMIN_DN** | String | The admin DN for LDAP authentication. @@ -22,7 +23,9 @@ | **LDAP_URI** | String | The LDAP URI. | **LDAP_USER_FILTER** | String | The user filter for LDAP authentication. | **LDAP_USER_RDN** | Array of String| The user RDN for LDAP authentication. -| **TEAM_RESYNC_STALE_TIME** | String | If team syncing is enabled for a team, how often to check its membership and resync if necessary + +| **LDAP_SECONDARY_USER_RDNS** | Array of String | Provide Secondary User Relative DNs if there are multiple Organizational Units where user objects are located. + +| **TEAM_RESYNC_STALE_TIME** | String | If team syncing is enabled for a team, how often to check its membership and resync if necessary. + + **Pattern:** + `^[0-9]+(w\|m\|d\|h\|s)$` + @@ -37,47 +40,83 @@ With this field, administrators can add or remove superusers without having to u This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. +| **LDAP_GLOBAL_READONLY_SUPERUSER_FILTER** | String | When set, grants users of this list read access to all repositories, regardless of whether they are public repositories. Only works for those superusers defined with the `LDAP_SUPERUSER_FILTER` configuration field. + | **LDAP_RESTRICTED_USER_FILTER** | String | Subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. -This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. +This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. + +| **FEATURE_RESTRICTED_USERS** | Boolean | When set to `True` with `LDAP_RESTRICTED_USER_FILTER` active, only the listed users in the defined LDAP group are restricted. + +*Default:* `False` + +| **LDAP_TIMEOUT** |Integer | Specifies the time limit, in seconds, for LDAP operations. This limits the amount of time an LDAP search, bind, or other operation can take. Similar to the `-l` option in `ldapsearch`, it sets a client-side operation timeout. + + + +**Default:** `10` + +| **LDAP_NETWORK_TIMEOUT** |Integer | Specifies the time limit, in seconds, for establishing a connection to the LDAP server. This is the maximum time {productname} waits for a response during network operations, similar to the `-o nettimeout` option in `ldapsearch`. + + + +**Default:** `10` |=== [id="ldap-config-field-reference"] -== LDAP configuration field references +== LDAP configuration references -Use the following references to update your `config.yaml` file with the desired configuration field. +Use the following references to update your `config.yaml` file with the desired LDAP settings. [id="reference-ldap-user"] -=== Basic LDAP user configuration +=== Basic LDAP configuration + +Use the following reference for a basic LDAP configuration. [source,yaml] ---- --- -AUTHENTICATION_TYPE: LDAP +AUTHENTICATION_TYPE: LDAP <1> --- -LDAP_ADMIN_DN: uid=testuser,ou=Users,o=orgid,dc=jumpexamplecloud,dc=com -LDAP_ADMIN_PASSWD: samplepassword -LDAP_ALLOW_INSECURE_FALLBACK: false -LDAP_BASE_DN: - - o=orgid - - dc=example - - dc=com -LDAP_EMAIL_ATTR: mail -LDAP_UID_ATTR: uid -LDAP_URI: ldap://ldap.example.com:389 -LDAP_USER_RDN: - - ou=Users +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com <2> +LDAP_ADMIN_PASSWD: ABC123 <3> +LDAP_ALLOW_INSECURE_FALLBACK: false <4> +LDAP_BASE_DN: <5> + - dc=example + - dc=com +LDAP_EMAIL_ATTR: mail <6> +LDAP_UID_ATTR: uid <7> +LDAP_URI: ldap://.com <8> +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,dc=,dc=com) <9> +LDAP_USER_RDN: <10> + - ou=people +LDAP_SECONDARY_USER_RDNS: <11> + - ou= + - ou= + - ou= + - ou= ---- +<1> Required. Must be set to `LDAP`. +<2> Required. The admin DN for LDAP authentication. +<3> Required. The admin password for LDAP authentication. +<4> Required. Whether to allow SSL/TLS insecure fallback for LDAP authentication. +<5> Required. The base DN for LDAP authentication. +<6> Required. The email attribute for LDAP authentication. +<7> Required. The UID attribute for LDAP authentication. +<8> Required. The LDAP URI. +<9> Required. The user filter for LDAP authentication. +<10> Required. The user RDN for LDAP authentication. +<11> Optional. Secondary User Relative DNs if there are multiple Organizational Units where user objects are located. [id="reference-ldap-restricted-user"] === LDAP restricted user configuration +Use the following reference for an LDAP restricted user configuration. + [source,yaml] ---- ---- +# ... AUTHENTICATION_TYPE: LDAP ---- +# ... +FEATURE_RESTRICTED_USERS: true <1> +# ... LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com LDAP_ADMIN_PASSWD: ABC123 LDAP_ALLOW_INSECURE_FALLBACK: false @@ -89,23 +128,28 @@ LDAP_EMAIL_ATTR: mail LDAP_UID_ATTR: uid LDAP_URI: ldap://.com LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) -LDAP_RESTRICTED_USER_FILTER: (=) +LDAP_RESTRICTED_USER_FILTER: (=) <2> LDAP_USER_RDN: - ou= - o= - dc= - dc=com ---- +# ... ---- +<1> Must be set to `true` when configuring an LDAP restricted user. +<2> Configures specified users as restricted users. [id="reference-ldap-super-user"] -=== LDAP superuser configuration reference +=== LDAP superuser configuration reference + +Use the following reference for an LDAP superuser configuration. + [source,yaml] ---- ---- +# ... AUTHENTICATION_TYPE: LDAP ---- +# ... LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com LDAP_ADMIN_PASSWD: ABC123 LDAP_ALLOW_INSECURE_FALLBACK: false @@ -117,10 +161,12 @@ LDAP_EMAIL_ATTR: mail LDAP_UID_ATTR: uid LDAP_URI: ldap://.com LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) -LDAP_SUPERUSER_FILTER: (=) +LDAP_SUPERUSER_FILTER: (=) <1> LDAP_USER_RDN: - ou= - o= - dc= - dc=com ----- \ No newline at end of file +# ... +---- +<1> Configures specified users as superusers. diff --git a/modules/config-fields-legacy.adoc b/modules/config-fields-legacy.adoc index cdbe0a491..5b11cf1a4 100644 --- a/modules/config-fields-legacy.adoc +++ b/modules/config-fields-legacy.adoc @@ -1,7 +1,8 @@ -[[config-fields-legacy]] +:_content-type: REFERENCE +[id="config-fields-legacy"] = Legacy configuration fields -Some fields are deprecated or obsolete: +The following fields are deprecated or obsolete. .Legacy configuration fields [cols="3a,1a,2a",options="header"] @@ -15,15 +16,20 @@ Some fields are deprecated or obsolete: + **Example**: `<1.8.0` + **Default:** `<1.6.0` -| **DOCUMENTATION_ROOT** | String | Root URL for documentation links +| **DOCUMENTATION_ROOT** | String | Root URL for documentation links. This field is useful when {productname} is configured for disconnected environments to set an alternatively, or allowlisted, documentation link. | **SECURITY_SCANNER_V4_NAMESPACE_WHITELIST** | String | The namespaces for which the security scanner should be enabled | **FEATURE_RESTRICTED_V1_PUSH** | Boolean | If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push + + -**Default:** True +**Default:** `False` | **V1_PUSH_WHITELIST** | Array of String | The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true +| *FEATURE_HELM_OCI_SUPPORT* | Boolean | Enable support for Helm artifacts. + + + +**Default:** `False` -|=== +|**ALLOWED_OCI_ARTIFACT_TYPES** | Object | The set of allowed OCI artifact MIME types and the associated layer types. + +|=== \ No newline at end of file diff --git a/modules/config-fields-mirroring.adoc b/modules/config-fields-mirroring.adoc index af03d8dfe..4dabe8db1 100644 --- a/modules/config-fields-mirroring.adoc +++ b/modules/config-fields-mirroring.adoc @@ -19,7 +19,7 @@ `openshift-quay-service` | **REPO_MIRROR_TLS_VERIFY** | Boolean | Require HTTPS and verify certificates of Quay registry during mirror. + + - **Default:** `false` + **Default:** `true` |**REPO_MIRROR_ROLLBACK** | Boolean | When set to `true`, the repository rolls back after a failed mirror attempt. diff --git a/modules/config-fields-misc.adoc b/modules/config-fields-misc.adoc index 8debc1b14..860d44fbb 100644 --- a/modules/config-fields-misc.adoc +++ b/modules/config-fields-misc.adoc @@ -1,7 +1,7 @@ -[[config-fields-misc]] +:_content-type: REFERENCE +[id="config-fields-misc"] = Miscellaneous configuration fields - .Miscellaneous configuration fields [cols="3a,1a,2a",options="header"] |=== @@ -54,199 +54,48 @@ | xref:reference-miscellaneous-v2-ui[**FEATURE_UI_V2**] | Boolean | When set, allows users to try the beta UI environment. *Default:* `True` -|=== - -//Consider removing in 3.9 - -[id="miscellaneous-config-field-reference"] -== Miscellaneous configuration field references - -Use the following references to update your `config.yaml` file with the desired configuration field. - -[id="reference-miscellaneous-v2-ui"] -=== v2 user interface configuration - -With `FEATURE_UI_V2` enabled, you can toggle between the current version of the user interface and the new version of the user interface. - -[IMPORTANT] -==== -* This UI is currently in beta and subject to change. In its current state, users can only create, view, and delete organizations, repositories, and image tags. -* When running {productname} in the old UI, timed-out sessions would require that the user input their password again in the pop-up window. With the new UI, users are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. -* There is a discrepancy in how image manifest sizes are reported between the legacy UI and the new UI. In the legacy UI, image manifests were reported in mebibytes. In the new UI, {productname} uses the standard definition of megabyte (MB) to report image manifest sizes. -==== - -.Procedure - -. In your deployment's `config.yaml` file, add the `FEATURE_UI_V2` parameter and set it to `true`, for example: -+ -[source,yaml] ----- ---- -FEATURE_TEAM_SYNCING: false -FEATURE_UI_V2: true -FEATURE_USER_CREATION: true ---- ----- - -. Log in to your {productname} deployment. - -. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to new UI, and then click *Use Beta Environment*, for example: -+ -image:38-ui-toggle.png[{productname} 3.8 UI toggle] - -[id="creating-new-organization-v2-ui"] -==== Creating a new organization in the {productname} 3.8 beta UI - -.Prerequisites - -* You have toggled your {productname} deployment to use the 3.8 beta UI. - -Use the following procedure to create an organization using the {productname} 3.8 beta UI. - -.Procedure - -. Click *Organization* in the navigation pane. - -. Click *Create Organization*. - -. Enter an *Organization Name*, for example, `testorg`. - -. Click *Create*. - -Now, your example organization should populate under the *Organizations* page. - -[id="deleting-organization-v2"] -==== Deleting an organization using the {productname} 3.8 beta UI - -Use the following procedure to delete an organization using the {productname} 3.8 beta UI. - -.Procedure - -. On the *Organizations* page, select the name of the organization you want to delete, for example, `testorg`. - -. Click the *More Actions* drop down menu. - -. Click *Delete*. -+ -[NOTE] -==== -On the *Delete* page, there is a *Search* input box. With this box, users can search for specific organizations to ensure that they are properly scheduled for deletion. For example, if a user is deleting 10 organizations and they want to ensure that a specific organization was deleted, they can use the *Search* input box to confirm said organization is marked for deletion. -==== - -. Confirm that you want to permanently delete the organization by typing *confirm* in the box. - -. Click *Delete*. - -After deletion, you are returned to the *Organizations* page. - -[NOTE] -==== -You can delete more than one organization at a time by selecting multiple organizations, and then clicking *More Actions* -> *Delete*. -==== - -[id="creating-new-repository-v2"] -==== Creating a new repository using the {productname} 3.8 beta UI - -Use the following procedure to create a repository using the {productname} 3.8 beta UI. - -.Procedure - -. Click *Repositories* on the navigation pane. -. Click *Create Repository*. - -. Select a namespace, for example, *quayadmin*, and then enter a *Repository name*, for example, `testrepo`. - -. Click *Create*. - -Now, your example repository should populate under the *Repositories* page. - -[id="deleting-repository-v2"] -==== Deleting a repository using the {productname} 3.8 beta UI - -.Prerequisites - -* You have created a repository. - -.Procedure - -. On the *Repositories* page of the {productname} 3.8 beta UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. - -. Click the *More Actions* drop-down menu. - -. Click *Delete*. -+ -[NOTE] -==== -If desired, you could click *Make Public* or *Make Private*. -==== - -. Type *confirm* in the box, and then click *Delete*. - -. After deletion, you are returned to the *Repositories* page. - -[id="pushing-image-v2"] -==== Pushing an image to the {productname} 3.8 beta UI - -Use the following procedure to push an image to the {productname} 3.8 beta UI. - -.Procedure - -. Pull a sample image from an external registry: -+ -[source,terminal] ----- -$ podman pull busybox ----- - -. Tag the image: -+ -[source,terminal] ----- -$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test ----- - -. Push the image to your {productname} registry: -+ -[source,terminal] ----- -$ podman push quay-server.example.com/quayadmin/busybox:test ----- - -. Navigate to the *Repositories* page on the {productname} UI and ensure that your image has been properly pushed. - -. You can check the security details by selecting your image tag, and then navigating to the *Security Report* page. - -[id="deleting-image-v2"] -==== Deleting an image using the {productname} 3.8 beta UI - -Use the following procedure to delete an image using the{productname} 3.8 beta UI. - -.Prerequisites - -* You have pushed an image to your {productname} registry. - -.Procedure +| **FEATURE_REQUIRE_TEAM_INVITE** | Boolean | Whether to require invitations when adding a user to a team + + + +**Default:** True -. On the *Repositories* page of the {productname} 3.8 beta UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. +| **FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH** | Boolean | Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth + + + +**Default:** False -. Click the *More Actions* drop-down menu. +| **FEATURE_RATE_LIMITS** | Boolean | Whether to enable rate limits on API and registry endpoints. Setting FEATURE_RATE_LIMITS to `true` causes `nginx` to limit certain API calls to 30 per second. If that feature is not set, API calls are limited to 300 per second (effectively unlimited). + + + +**Default:** False -. Click *Delete*. -+ -[NOTE] -==== -If desired, you could click *Make Public* or *Make Private*. -==== +| **FEATURE_FIPS** | Boolean | If set to true, {productname} will run using FIPS-compliant hash functions + + + + **Default:** False -. Type *confirm* in the box, and then click *Delete*. +| **FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL** | Boolean | Whether to allow retrieval of aggregated log counts + + + + **Default:** True + | **FEATURE_ANONYMOUS_ACCESS** | Boolean | Whether to allow anonymous users to browse and pull public repositories + + + +**Default:** True -. After deletion, you are returned to the *Repositories* page. +| **FEATURE_DIRECT_LOGIN** | Boolean | Whether users can directly login to the UI + + + +**Default:** True +| **FEATURE_LIBRARY_SUPPORT** | Boolean | Whether to allow for "namespace-less" repositories when pulling and pushing from Docker + + + +**Default:** True +| **FEATURE_PARTIAL_USER_AUTOCOMPLETE** | Boolean | If set to true, autocompletion will apply to partial usernames+ + + +**Default:** True +| **FEATURE_PERMANENT_SESSIONS** | Boolean | Whether sessions are permanent + + + +**Default:** True +| **FEATURE_PUBLIC_CATALOG** | Boolean | If set to true, the `_catalog` endpoint returns public repositories. Otherwise, only private repositories can be returned. + + + +**Default:** False -[id="enabling-legacy-ui"] -==== Enabling the {productname} legacy UI +|*DISABLE_PUSHES* |Boolean | Disables pushes of new content to the registry while retaining all other functionality. Differs from `read-only` mode because database is not set as `read-only`. When `DISABLE_PUSHES` is set to `true`, the {productname} garbage collector is disabled. As a result, when `PERMANENTLY_DELETE_TAGS` is enabled, using the {productname} UI to permanently delete a tag does not result in the immediate deletion of a tag. Instead, the image stays in the backend storage until `DISABLE_PUSHES` is set to `false`, which re-enables the garbage collector. {productname} administrators should be aware of this caveat when using `DISABLE_PUSHES` and `PERMANENTLY_DELETE_TAGS` together. + + + + **Default:** False -. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to *Current UI*. -+ -image:38-ui-toggle.png[{productname} 3.8 UI toggle] - +|=== \ No newline at end of file diff --git a/modules/config-fields-modelcard-rendering.adoc b/modules/config-fields-modelcard-rendering.adoc new file mode 100644 index 000000000..eaa30b30c --- /dev/null +++ b/modules/config-fields-modelcard-rendering.adoc @@ -0,0 +1,28 @@ +[id="config-fields-model-card-rendering"] +== Model card rendering + +The following configuration fields have been added to support model card rendering on the v2 UI. + +|=== +| Field | Type | Description + +|*FEATURE_UI_MODELCARD* |Boolean | Enables *Model Card* image tab in UI. Defaults to `true`. +|*UI_MODELCARD_ARTIFACT_TYPE* | String | Defines the model card artifact type. +|*UI_MODELCARD_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|*UI_MODELCARD_LAYER_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|=== + +.Example model card YAML +[source,yaml] +---- +FEATURE_UI_MODELCARD: true <1> +UI_MODELCARD_ARTIFACT_TYPE: application/x-mlmodel <2> +UI_MODELCARD_ANNOTATION: <3> + org.opencontainers.image.description: "Model card metadata" +UI_MODELCARD_LAYER_ANNOTATION: <4> + org.opencontainers.image.title: README.md +---- +<1> Enables the *Model Card* image tab in the UI. +<2> Defines the model card artifact type. In this example, the artifact type is `application/x-mlmodel`. +<3> Optional. If an image does not have an `artifactType` defined, this field is checked at the manifest level. If a matching annotation is found, the system then searches for a layer with an annotation matching `UI_MODELCARD_LAYER_ANNOTATION`. +<4> Optional. If an image has an `artifactType` defined and multiple layers, this field is used to locate the specific layer containing the model card. \ No newline at end of file diff --git a/modules/config-fields-nested-repositories.adoc b/modules/config-fields-nested-repositories.adoc index 5f68d2e06..64a3126bc 100644 --- a/modules/config-fields-nested-repositories.adoc +++ b/modules/config-fields-nested-repositories.adoc @@ -1,12 +1,8 @@ -[[config-fields-nested-repositories]] +:_content-type: REFERENCE +[id="config-fields-nested-repositories"] = Nested repositories configuration fields -With {productname} 3.6, support for nested repository path names has been added under the `FEATURE_EXTENDED_REPOSITORY_NAMES` property. This optional configuration is added to the config.yaml by default. Enablement allows the use of `/` in repository names. - -[source,yaml] ----- -FEATURE_EXTENDED_REPOSITORY_NAMES: true ----- +Support for nested repository path names has been added under the `FEATURE_EXTENDED_REPOSITORY_NAMES` property. This optional configuration is added to the config.yaml by default. Enablement allows the use of `/` in repository names. .OCI and nested repositories configuration fields [cols="3a,1a,2a",options="header"] @@ -17,3 +13,9 @@ FEATURE_EXTENDED_REPOSITORY_NAMES: true **Default:** True |=== + +.OCI and nested repositories configuration example +[source,yaml] +---- +FEATURE_EXTENDED_REPOSITORY_NAMES: true +---- diff --git a/modules/config-fields-netapp-ontap-s3.adoc b/modules/config-fields-netapp-ontap-s3.adoc new file mode 100644 index 000000000..cbfcf2003 --- /dev/null +++ b/modules/config-fields-netapp-ontap-s3.adoc @@ -0,0 +1,24 @@ +:_content-type: REFERENCE +[id="config-fields-netapp-ontap"] += NetApp ONTAP S3 object storage + +The following YAML shows a sample configuration using NetApp ONTAP S3. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + local_us: + - RadosGWStorage + - access_key: + bucket_name: + hostname: + is_secure: true + port: + secret_key: + storage_path: /datastorage/registry + signature_version: v4 +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: +- local_us +DISTRIBUTED_STORAGE_PREFERENCE: +- local_us +---- diff --git a/modules/config-fields-nutanix.adoc b/modules/config-fields-nutanix.adoc new file mode 100644 index 000000000..e7de3b8a3 --- /dev/null +++ b/modules/config-fields-nutanix.adoc @@ -0,0 +1,22 @@ +:_content-type: REFERENCE +[id="config-fields-nutanix"] += Nutanix object storage + +The following YAML shows a sample configuration using Nutanix object storage. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + nutanixStorage: #storage config name + - RadosGWStorage #actual driver + - access_key: access_key_here #parameters + secret_key: secret_key_here + bucket_name: bucket_name_here + hostname: hostname_here + is_secure: 'true' + port: '443' + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: #must contain name of the storage config + - nutanixStorage +---- \ No newline at end of file diff --git a/modules/config-fields-oauth.adoc b/modules/config-fields-oauth.adoc index e1c0c96a3..9cf92fa36 100644 --- a/modules/config-fields-oauth.adoc +++ b/modules/config-fields-oauth.adoc @@ -1,4 +1,5 @@ -[[config-fields-oauth]] +:_content-type: REFERENCE +[id="config-fields-oauth"] = OAuth configuration fields .OAuth fields @@ -6,9 +7,12 @@ |=== | Field | Type | Description | **DIRECT_OAUTH_CLIENTID_WHITELIST** | Array of String | A list of client IDs for **Quay-managed** applications that are allowed to perform direct OAuth approval without user approval. -|=== +|*FEATURE_ASSIGN_OAUTH_TOKEN* | Boolean| Allows organization administrators to assign OAuth tokens to other users. + +|=== +[id="github-oauth-config-fields"] == GitHub OAuth configuration fields .GitHub OAuth fields @@ -24,37 +28,37 @@ + **Example:** `https://api.github.com/` | {nbsp}{nbsp}{nbsp}**.CLIENT_ID** + -{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance; cannot be shared with GITHUB_TRIGGER_CONFIG + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance; cannot be shared with `GITHUB_TRIGGER_CONFIG`. + + **Example:** `0e8dbe15c4c7630b6780` | {nbsp}{nbsp}{nbsp}**.CLIENT_SECRET** + -{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance. + + **Example:** `e4a58ddd3d7408b7aec109e85564a0d153d3e846` | {nbsp}{nbsp}{nbsp}**.GITHUB_ENDPOINT** + -{nbsp}{nbsp}{nbsp}(Required) | String | The endpoint for GitHub (Enterprise) + +{nbsp}{nbsp}{nbsp}(Required) | String | The endpoint for GitHub (Enterprise). + + **Example**: `https://github.com/` | {nbsp}{nbsp}{nbsp}**.ORG_RESTRICT** | Boolean | If true, only users within the organization whitelist can login using this provider. |=== - +[id="google-oauth-config-fields"] == Google OAuth configuration fields .Google OAuth fields [cols="3a,1a,2a",options="header"] |=== | Field | Type | Description -| **FEATURE_GOOGLE_LOGIN** | Boolean | Whether Google login is supported + +| **FEATURE_GOOGLE_LOGIN** | Boolean | Whether Google login is supported. + + **Default: `False` -| **GOOGLE_LOGIN_CONFIG** | Object | Configuration for using Google for external authentication +| **GOOGLE_LOGIN_CONFIG** | Object | Configuration for using Google for external authentication. | {nbsp}{nbsp}{nbsp}**.CLIENT_ID** + -{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance. + + **Example:** `0e8dbe15c4c7630b6780` | {nbsp}{nbsp}{nbsp}**.CLIENT_SECRET** + -{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance. + + -**Example:** e4a58ddd3d7408b7aec109e85564a0d153d3e846 +**Example:** `e4a58ddd3d7408b7aec109e85564a0d153d3e846` |=== \ No newline at end of file diff --git a/modules/config-fields-optional-intro.adoc b/modules/config-fields-optional-intro.adoc index a1bbb2299..e995947ba 100644 --- a/modules/config-fields-optional-intro.adoc +++ b/modules/config-fields-optional-intro.adoc @@ -2,14 +2,15 @@ [id="config-fields-optional-intro"] = Optional configuration fields -Optional fields for {productname} can be found in the following sections: +Optional fields for {productname} can be found in the following sections: * xref:config-fields-basic[Basic configuration] * xref:config-fields-ssl[SSL] * xref:config-fields-ldap[LDAP] * xref:config-fields-mirroring[Repository mirroring] +* xref:config-fields-quota-management[Quota management] * xref:config-fields-scanner[Security scanner] -* xref:config-fields-helm-oci[OCI and Helm] +* xref:config-fields-helm-oci[Helm] * xref:config-fields-actionlog[Action log] * xref:config-fields-build-logs[Build logs] * xref:config-fields-dockerfile-build[Dockerfile build] @@ -23,6 +24,6 @@ Optional fields for {productname} can be found in the following sections: * xref:config-fields-jwt[JWT] * xref:config-fields-app-tokens[App tokens] * xref:config-fields-misc[Miscellaneous] -* xref:config-fields-legacy[Legacy options] * xref:config-fields-v2-ui[User interface v2] -* xref:config-fields-ipv6[IPv6 configuration field] \ No newline at end of file +* xref:config-fields-ipv6[IPv6 configuration field] +* xref:config-fields-legacy[Legacy options] \ No newline at end of file diff --git a/modules/config-fields-overview.adoc b/modules/config-fields-overview.adoc index 12fb2947a..4e9bfa022 100644 --- a/modules/config-fields-overview.adoc +++ b/modules/config-fields-overview.adoc @@ -18,13 +18,48 @@ $ clair -conf ./path/to/config.yaml -mode matcher The aforementioned commands each start two Clair nodes using the same configuration file. One runs the indexing facilities, while other runs the matching facilities. +If you are running Clair in `combo` mode, you must supply the indexer, matcher, and notifier configuration blocks in the configuration. + +[id="information-using-clair-proxy-environment"] +== Information about using Clair in a proxy environment + Environment variables respected by the Go standard library can be specified if needed, for example: * `HTTP_PROXY` -* `HTTPS_PROXY` ++ +[source,terminal] +---- +$ export HTTP_PROXY=http://:@: +---- +* `HTTPS_PROXY`. ++ +[source,terminal] +---- +$ export HTTPS_PROXY=https://:@: +---- * `SSL_CERT_DIR` ++ +[source,terminal] +---- +$ export SSL_CERT_DIR=//// +---- +* `NO_PROXY` ++ +[source,terminal] +---- +$ export NO_PROXY= +---- -If you are running Clair in `combo` mode, you must supply the indexer, matcher, and notifier configuration blocks in the configuration. +If you are using a proxy server in your environment with Clair's updater URLs, you must identify which URL needs to be added to the proxy allowlist to ensure that Clair can access them unimpeded. For example, the `osv` updater requires access to `\https://osv-vulnerabilities.storage.googleapis.com` to fetch ecosystem data dumps. In this scenario, the URL must be added to the proxy allowlist. For a full list of updater URLs, see "Clair updater URLs". + +You must also ensure that the standard Clair URLs are added to the proxy allowlist: + +* `\https://search.maven.org/solrsearch/select` +* `\https://catalog.redhat.com/api/containers/` +* `\https://access.redhat.com/security/data/metrics/repository-to-cpe.json` +* `\https://access.redhat.com/security/data/metrics/container-name-repos-map.json` + +When configuring the proxy server, take into account any authentication requirements or specific proxy settings needed to enable seamless communication between Clair and these URLs. By thoroughly documenting and addressing these considerations, you can ensure that Clair functions effectively while routing its updater traffic through the proxy. [id="config-fields-clair-reference"] == Clair configuration reference @@ -40,7 +75,7 @@ tls: {} indexer: connstring: "" scanlock_retry: 0 - layer_scan_concurrency: 0 + layer_scan_concurrency: 5 migrations: false scanner: {} airgap: false diff --git a/modules/config-fields-proxy-cache.adoc b/modules/config-fields-proxy-cache.adoc new file mode 100644 index 000000000..be70b0001 --- /dev/null +++ b/modules/config-fields-proxy-cache.adoc @@ -0,0 +1,13 @@ +:_content-type: REFERENCE +[id="config-fields-proxy-cache"] += Proxy cache configuration fields + +.Proxy configuration +[cols="3a,1a,2a",options="header"] +|=== +|Field |Type |Description +|**FEATURE_PROXY_CACHE** | Boolean | Enables {productname} to act as a pull through cache for upstream registries. + +*Default*: `false` + +|=== \ No newline at end of file diff --git a/modules/config-fields-quota-management.adoc b/modules/config-fields-quota-management.adoc new file mode 100644 index 000000000..1da719ae8 --- /dev/null +++ b/modules/config-fields-quota-management.adoc @@ -0,0 +1,47 @@ +:_content-type: REFERENCE +[id="config-fields-quota-management"] += Quota management configuration fields + +.Quota management configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_QUOTA_MANAGEMENT** | Boolean | Enables configuration, caching, and validation for quota management feature. + + **Default:** `False` + +| **DEFAULT_SYSTEM_REJECT_QUOTA_BYTES** | String | Enables system default quota reject byte allowance for all organizations. + +By default, no limit is set. + +| **QUOTA_BACKFILL** | Boolean | Enables the quota backfill worker to calculate the size of pre-existing blobs. + +**Default**: `True` + +|**QUOTA_TOTAL_DELAY_SECONDS** |String | The time delay for starting the quota backfill. Rolling deployments can cause incorrect totals. This field *must* be set to a time longer than it takes for the rolling deployment to complete. + +**Default**: `1800` + +|**PERMANENTLY_DELETE_TAGS** |Boolean | Enables functionality related to the removal of tags from the time machine window. + +**Default**: `False` + +|**RESET_CHILD_MANIFEST_EXPIRATION** |Boolean |Resets the expirations of temporary tags targeting the child manifests. With this feature set to `True`, child manifests are immediately garbage collected. + +**Default**: `False` +|=== + +[id="suggested-management-config-settings-39"] +== Example quota management configuration + +The following YAML is the suggested configuration when enabling quota management. + +.Quota management YAML configuration +[source,yaml] +---- +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_GARBAGE_COLLECTION: true +PERMANENTLY_DELETE_TAGS: true +QUOTA_TOTAL_DELAY_SECONDS: 1800 +RESET_CHILD_MANIFEST_EXPIRATION: true +---- \ No newline at end of file diff --git a/modules/config-fields-quota.adoc b/modules/config-fields-quota.adoc deleted file mode 100644 index d4ddfca72..000000000 --- a/modules/config-fields-quota.adoc +++ /dev/null @@ -1,32 +0,0 @@ -[[config-fields-quota]] -= Quota management configuration - -Quota management is now supported under the `FEATURE_QUOTA_MANAGEMENT` property and is turned off by default. To enable quota management, set the feature flag in your `config.yaml` to `true`: - -[source,yaml] ----- -FEATURE_QUOTA_MANAGEMENT: true ----- - - -[NOTE] -==== -In {productname} 3.7, superuser privileges are required to create, update and delete quotas. While quotas can be set for users as well as organizations, you cannot reconfigure the _user_ quota using the {productname} UI and you must use the API instead. -==== - - -== Default quota - -To specify a system-wide default storage quota that is applied to every organization and user, use the *DEFAULT_SYSTEM_REJECT_QUOTA_BYTES* configuration flag. - -.Default quota configuration -[cols="3a,1a,2a",options="header"] -|=== -| Field | Type | Description -| **DEFAULT_SYSTEM_REJECT_QUOTA_BYTES** | String | The quota size to apply to all organizations and users. + - + -By default, no limit is set. -|=== - - -If you configure a specific quota for an organization or user, and then delete that quota, the system-wide default quota will apply if one has been set. Similarly, if you have configured a specific quota for an organization or user, and then modify the system-wide default quota, the updated system-wide default will override any specific settings. \ No newline at end of file diff --git a/modules/config-fields-redis.adoc b/modules/config-fields-redis.adoc index 754b50423..84dd1e8e4 100644 --- a/modules/config-fields-redis.adoc +++ b/modules/config-fields-redis.adoc @@ -22,14 +22,11 @@ The following build logs configuration fields are available for Redis deployment (Required)| Number | The port at which Redis is accessible. + **Example:** + `6379` -|**.password** | String | The port at which Redis is accessible. + +|**.password** | String | The password to connect to the Redis instance. + **Example:** + `strongpassword` -|**.port** + -(Required)| Number | The port at which Redis is accessible. + -**Example:** + -`6379` -| **ssl** | Boolean | Whether to enable TLS communication between Redis and Quay. Defaults to false. +| **.ssl** + +(Optional) | Boolean | Whether to enable TLS communication between Redis and Quay. Defaults to false. |=== [id="user-event-fields-redis"] @@ -51,29 +48,55 @@ The following user event fields are available for Redis deployments: (Required)| Number | The port at which Redis is accessible. + **Example:** + `6379` -|**.password** | String | The port at which Redis is accessible. + +|**.password** | String | The password to connect to the Redis instance. + **Example:** + `strongpassword` -| **ssl** | Boolean | Whether to enable TLS communication between Redis and Quay. Defaults to false. +| **.ssl** | Boolean | Whether to enable TLS communication between Redis and Quay. Defaults to false. +| **.ssl_keyfile** + +(Optional) | String | The name of the key database file, which houses the client certificate to be used. + +**Example:** + +`ssl_keyfile: /path/to/server/privatekey.pem` +| **.ssl_certfile** + +(Optional) | String | Used for specifying the file path of the SSL certificate. + +**Example:** + +`ssl_certfile: /path/to/server/certificate.pem` +| **.ssl_cert_reqs** + +(Optional) | String | Used to specify the level of certificate validation to be performed during the SSL/TLS handshake. + +**Example:** + +`ssl_cert_reqs: CERT_REQUIRED` +| **.ssl_ca_certs** + +(Optional) | String | Used to specify the path to a file containing a list of trusted Certificate Authority (CA) certificates. + +**Example:** + +`ssl_ca_certs: /path/to/ca_certs.pem` +| **.ssl_ca_data** + +(Optional) | String | Used to specify a string containing the trusted CA certificates in PEM format. + +**Example:** + +`ssl_ca_data: ` +| **.ssl_check_hostname ** + +(Optional) | Boolean | Used when setting up an SSL/TLS connection to a server. It specifies whether the client should check that the hostname in the server's SSL/TLS certificate matches the hostname of the server it is connecting to. + +**Example:** + +`ssl_check_hostname: true` |=== [id="example-redis-configuration"] == Example Redis configuration -The following YAML shows a sample configuration using Redis: +The following YAML shows a sample configuration using Redis with optional SSL/TLS fields: [source,yaml] ---- BUILDLOGS_REDIS: - host: quay-server.example.com - password: strongpassword - port: 6379 - ssl: true + host: quay-server.example.com + password: strongpassword + port: 6379 + ssl: true + USER_EVENTS_REDIS: - host: quay-server.example.com - password: strongpassword - port: 6379 - ssl: true + host: quay-server.example.com + password: strongpassword + port: 6379 + ssl: true + ssl_*: ---- [NOTE] diff --git a/modules/config-fields-required-general.adoc b/modules/config-fields-required-general.adoc index 33c7933d9..ed5ef53df 100644 --- a/modules/config-fields-required-general.adoc +++ b/modules/config-fields-required-general.adoc @@ -28,11 +28,12 @@ One of `http`, `https` + **Example:** + `quay-server.example.com` | **DATABASE_SECRET_KEY** + -(Required) | String | Key used to encrypt sensitive fields within the database. This value should never be changed once set, otherwise all reliant fields, for example, repository mirror username and password configurations, are invalidated. +(Required) | String | Key used to encrypt sensitive fields within the database. This value should never be changed once set, otherwise all reliant fields, for example, repository mirror username and password configurations, are invalidated. + +This value is set automatically by the {productname} Operator for Operator-based deployments. For standalone deployments, administrators can provide their own key using Open SSL or a similar tool. Key length should not exceed 63 characters. | **SECRET_KEY** + -(Required) | String | Key used to encrypt sensitive fields within the database and at run time. This value should never be changed once set, otherwise all reliant fields, for example, encrypted password credentials, are invalidated. +(Required) | String | Key used to encrypt the session cookie and the CSRF token needed for correct interpretation of the user session. The value should not be changed when set. Should be persistent across all {productname} instances. If not persistent across all instances, login failures and other errors related to session persistence might occur. | **SETUP_COMPLETE** + -(Required) | Boolean | This is an artefact left over from earlier versions of the software and currently it **must** be specified with a value of `true`. +(Required) | Boolean | This is an artifact left over from earlier versions of the software and currently it **must** be specified with a value of `true`. |=== diff --git a/modules/config-fields-required-intro.adoc b/modules/config-fields-required-intro.adoc index 11d61efef..6a777c90f 100644 --- a/modules/config-fields-required-intro.adoc +++ b/modules/config-fields-required-intro.adoc @@ -5,8 +5,8 @@ The fields required to configure {productname} are covered in the following sections: -* xref:config-fields-required-general[General required fields] -* xref:config-fields-redis[Storage for images] -* xref:config-fields-db[Database for metadata] -* xref:config-fields-redis[Redis for build logs and user events] -* xref:config-fields-tag-expiration[Tag expiration options] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-required-general[General required fields] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-storage[Storage for images] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-db[Database for metadata] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-redis[Redis for build logs and user events] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-tag-expiration[Tag expiration options] diff --git a/modules/config-fields-robot-account.adoc b/modules/config-fields-robot-account.adoc new file mode 100644 index 000000000..b01799386 --- /dev/null +++ b/modules/config-fields-robot-account.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="config-fields-robot-account"] += Robot account configuration fields + +.Robot account configuration fields +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description + +|**ROBOTS_DISALLOW** |Boolean |When set to `true`, robot accounts are prevented from all interactions, as well as from being created + + +*Default*: `False` +|=== diff --git a/modules/config-fields-scanner.adoc b/modules/config-fields-scanner.adoc index 261c0e317..03cd5ee95 100644 --- a/modules/config-fields-scanner.adoc +++ b/modules/config-fields-scanner.adoc @@ -1,8 +1,7 @@ -[[config-fields-scanner]] +:_content-type: REFERENCE +[id="config-fields-scanner"] = Security scanner configuration fields - - .Security scanner configuration [cols="3a,1a,2a",options="header"] |=== @@ -22,9 +21,6 @@ **Example:** + `http://192.168.99.101:6060` | **SECURITY_SCANNER_V4_PSK** | String | The generated pre-shared key (PSK) for Clair -| **SECURITY_SCANNER_INDEXING_INTERVAL** | Number | The number of seconds between indexing intervals in the security scanner + - + -**Default:** 30 // TODO 36 Check that SECURITY_SCANNER_NOTIFICATIONS can be dropped // | **SECURITY_SCANNER_NOTIFICATIONS** | String | | **SECURITY_SCANNER_ENDPOINT** | String | The endpoint for the V2 security scanner + @@ -34,14 +30,52 @@ + **Example:** + `http://192.168.99.100:6060` -| **SECURITY_SCANNER_INDEXING_INTERVAL** | String | This parameter is used to determine the number of seconds between indexing intervals in the security scanner. When indexing is triggered, {productname} will query its database for manifests that must be indexed by Clair. These include manifests that have not yet been indexed and manifests that previously failed indexing. -|=== +| **SECURITY_SCANNER_INDEXING_INTERVAL** | Integer | This parameter is used to determine the number of seconds between indexing intervals in the security scanner. When indexing is triggered, {productname} will query its database for manifests that must be indexed by Clair. These include manifests that have not yet been indexed and manifests that previously failed indexing. + + + +**Default:** 30 + +| **FEATURE_SECURITY_SCANNER_NOTIFY_ON_NEW_INDEX** | Boolean | Whether to allow sending notifications about vulnerabilities for new pushes. + + +**Default**: `True` +| **SECURITY_SCANNER_V4_MANIFEST_CLEANUP** | Boolean | Whether the {productname} garbage collector removes manifests that are not referenced by other tags or manifests. + + +**Default**: `True` -The following is a special case for re-indexing: +| *NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX* | String | Set minimal security level for new notifications on detected vulnerabilities. Avoids creation of large number of notifications after first index. If not defined, defaults to `High`. Available options include `Critical`, `High`, `Medium`, `Low`, `Negligible`, and `Unknown`. + +| *SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE* | String | The maximum layer size allowed for indexing. If the layer size exceeds the configured size, the {productname} UI returns the following message: `The manifest for this tag has layer(s) that are too large to index by the Quay Security Scanner`. The default is `8G`, and the maximum recommended is `10G`. Accepted values are `B`, `K`, `M`, `T`, and `G`. + + + **Default**: `8G` +|=== + +[id="reindexing-clair-v4"] +== Re-indexing with Clair v4 When Clair v4 indexes a manifest, the result should be deterministic. For example, the same manifest should produce the same index report. This is true until the scanners are changed, as using different scanners will produce different information relating to a specific manifest to be returned in the report. Because of this, Clair v4 exposes a state representation of the indexing engine (`/indexer/api/v1/index_state`) to determine whether the scanner configuration has been changed. -{productname} leverages this index state by saving it to the index report when parsing to Quay's database. If this state has changed since the manifest was previously scanned, Quay will attempt to re-index that manifest during the periodic indexing process. +{productname} leverages this index state by saving it to the index report when parsing to Quay's database. If this state has changed since the manifest was previously scanned, {productname} will attempt to re-index that manifest during the periodic indexing process. + +By default this parameter is set to 30 seconds. Users might decrease the time if they want the indexing process to run more frequently, for example, if they did not want to wait 30 seconds to see security scan results in the UI after pushing a new tag. Users can also change the parameter if they want more control over the request pattern to Clair and the pattern of database operations being performed on the {productname} database. + +[id="example-security-scanner-config"] +== Example security scanner configuration + +The following YAML is the suggested configuration when enabling the security scanner feature. -By default this parameter is set to 30 seconds. Users might decrease the time if they want the indexing process to run more frequently, for example, if they did not want to wait 30 seconds to see security scan results in the UI after pushing a new tag. Users can also change the parameter if they want more control over the request pattern to Clair and the pattern of database operations being performed on the Quay database. +.Security scanner YAML configuration +[source,yaml] +---- +FEATURE_SECURITY_NOTIFICATIONS: true +FEATURE_SECURITY_SCANNER: true +FEATURE_SECURITY_SCANNER_NOTIFY_ON_NEW_INDEX: true +... +SECURITY_SCANNER_INDEXING_INTERVAL: 30 +SECURITY_SCANNER_V4_MANIFEST_CLEANUP: true +SECURITY_SCANNER_V4_ENDPOINT: http://quay-server.example.com:8081 +SECURITY_SCANNER_V4_PSK: MTU5YzA4Y2ZkNzJoMQ== +SERVER_HOSTNAME: quay-server.example.com +SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE: 8G <1> +... +---- +<1> Recommended maximum is `10G`. \ No newline at end of file diff --git a/modules/config-fields-session-logout.adoc b/modules/config-fields-session-logout.adoc new file mode 100644 index 000000000..9e96665c2 --- /dev/null +++ b/modules/config-fields-session-logout.adoc @@ -0,0 +1,32 @@ +:_content-type: CONCEPT +[id="config-fields-session-logout"] += Session timeout configuration field + +The following configuration field relies on on the Flask API configuration field of the same name. + +.Session logout configuration field +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **PERMANENT_SESSION_LIFETIME** | Integer | A `timedelta` which is used to set the expiration date of a permanent session. The default is 31 days, which makes a permanent session survive for roughly one month. + +*Default:* `2678400` +|=== + + +[id="suggested-permanent-session-lifetime-config"] +== Example session timeout configuration + +The following YAML is the suggest configuration when enabling session lifetime. + +[IMPORTANT] +==== +Altering session lifetime is not recommended. Administrators should be aware of the allotted time when setting a session timeout. If you set the time too early, it might interrupt your workflow. +==== + +.Session timeout YAML configuration +[source,yaml] +---- +PERMANENT_SESSION_LIFETIME: 3000 +---- + diff --git a/modules/config-fields-ssl.adoc b/modules/config-fields-ssl.adoc index ccf4a9402..4be30c688 100644 --- a/modules/config-fields-ssl.adoc +++ b/modules/config-fields-ssl.adoc @@ -1,4 +1,5 @@ -[[config-fields-ssl]] +:_content-type: CONCEPT +[id="config-fields-ssl"] = SSL configuration fields .SSL configuration @@ -6,7 +7,7 @@ |=== | Field | Type | Description | **PREFERRED_URL_SCHEME** | String | One of `http` or `https`. Note that users only set their `PREFERRED_URL_SCHEME` to `http` when there is no TLS encryption in the communication path from the client to Quay. -+ + + Users must set their `PREFERRED_URL_SCHEME`to `https` when using a TLS-terminating load balancer, a reverse proxy (for example, Nginx), or when using Quay with custom SSL certificates directly. In most cases, the `PREFERRED_URL_SCHEME` should be `https`. + **Default:** `http` @@ -19,7 +20,7 @@ Users must set their `PREFERRED_URL_SCHEME`to `https` when using a TLS-terminati | **SSL_CIPHERS** | Array of String | If specified, the nginx-defined list of SSL ciphers to enabled and disabled + + **Example:** + -[`CAMELLIA`, `!3DES`] +[`ECDHE-RSA-AES128-GCM-SHA256`, `ECDHE-ECDSA-AES128-GCM-SHA256`, `ECDHE-RSA-AES256-GCM-SHA384`, `ECDHE-ECDSA-AES256-GCM-SHA384`, `DHE-RSA-AES128-GCM-SHA256`, `DHE-DSS-AES128-GCM-SHA256`, `kEDH+AESGCM`, `ECDHE-RSA-AES128-SHA256`, `ECDHE-ECDSA-AES128-SHA256`, `ECDHE-RSA-AES128-SHA`, `ECDHE-ECDSA-AES128-SHA`, `ECDHE-RSA-AES256-SHA384`, `ECDHE-ECDSA-AES256-SHA384`, `ECDHE-RSA-AES256-SHA`, `ECDHE-ECDSA-AES256-SHA`, `DHE-RSA-AES128-SHA256`, `DHE-RSA-AES128-SHA`, `DHE-DSS-AES128-SHA256`, `DHE-RSA-AES256-SHA256`, `DHE-DSS-AES256-SHA`, `DHE-DSS-AES256-SHA`, `AES128-GCM-SHA256`, `AES256-GCM-SHA384`, `AES128-SHA256`, `AES256-SHA256`, `AES128-SHA`, `AES256-SHA`, `AES`, `!3DES"`, `!aNULL`, `!eNULL`, `!EXPORT`, `DES`, `!RC4`, `MD5`, `!PSK`, `!aECDH`, `!EDH-DSS-DES-CBC3-SHA`, `!EDH-RSA-DES-CBC3-SHA`, `!KRB5-DES-CBC3-SHA`] | **SSL_PROTOCOLS** | Array of String | If specified, nginx is configured to enabled a list of SSL protocols defined in the list. Removing an SSL protocol from the list disables the protocol during {productname} startup. + + **Example:** + diff --git a/modules/config-fields-storage-aws.adoc b/modules/config-fields-storage-aws.adoc index 370755c8a..1c0e57586 100644 --- a/modules/config-fields-storage-aws.adoc +++ b/modules/config-fields-storage-aws.adoc @@ -2,20 +2,114 @@ [id="config-fields-storage-aws"] = AWS S3 storage -The following YAML shows a sample configuration using AWS S3 storage: - +The following YAML shows a sample configuration using AWS S3 storage. [source,yaml] ---- +# ... DISTRIBUTED_STORAGE_CONFIG: - s3Storage: - - S3Storage + default: + - S3Storage <1> - host: s3.us-east-2.amazonaws.com s3_access_key: ABCDEFGHIJKLMN s3_secret_key: OL3ABCDEFGHIJKLMN s3_bucket: quay_bucket + s3_region: <2> storage_path: /datastorage/registry DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] DISTRIBUTED_STORAGE_PREFERENCE: - - s3Storage + - default +# ... +---- +<1> The `S3Storage` storage driver should only be used for AWS S3 buckets. Note that this differs from general S3 access, where the RadosGW driver or other storage services can be used. For an example, see "Example B: Using RadosGW with general S3 access". +<2> Optional. The Amazon Web Services region. Defaults to `us-east-1`. + +[id="config-fields-storage-aws-sts"] +== AWS STS S3 storage + +The following YAML shows an example configuration for using Amazon Web Services (AWS) Security Token Service (STS) with {productname-ocp} configurations. + +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_CONFIG: + default: + - STSS3Storage + - sts_role_arn: <1> + s3_bucket: + storage_path: + sts_user_access_key: <2> + sts_user_secret_key: <3> + s3_region: <4> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +# ... +---- +<1> The unique Amazon Resource Name (ARN). +<2> The generated AWS S3 user access key. +<3> The generated AWS S3 user secret key. +<4> Optional. The Amazon Web Services region. Defaults to `us-east-1`. + +[id="aws-cloudfront-storage-example"] +== AWS Cloudfront storage + +Use the following example when configuring AWS Cloudfront for your {productname} deployment. + +[NOTE] +==== +* When configuring AWS Cloudfront storage, the following conditions must be met for proper use with {productname}: +** You must set an *Origin path* that is consistent with {productname}'s storage path as defined in your `config.yaml` file. Failure to meet this require results in a `403` error when pulling an image. For more information, see link:https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginPath[Origin path]. +** You must configure a link:https://docs.aws.amazon.com/whitepapers/latest/secure-content-delivery-amazon-cloudfront/s3-origin-with-cloudfront.html[*Bucket policy*] and a link:https://docs.aws.amazon.com/AmazonS3/latest/userguide/cors.html[*Cross-origin resource sharing (CORS)*] policy. +==== + +.Cloudfront S3 example YAML +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - CloudFrontedS3Storage + - cloudfront_distribution_domain: + cloudfront_key_id: + cloudfront_privatekey_filename: + host: + s3_access_key: + s3_bucket: + s3_secret_key: + storage_path: + s3_region: +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: + - default +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- + +.Bucket policy example +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::user/CloudFront Origin Access Identity " <1> <2> + }, + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" <3> + }, + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::user/CloudFront Origin Access Identity " <1> <2> + }, + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::" + } + ] +} + ---- +<1> The identifier, or account ID, of the AWS account that owns the CloudFront OAI and S3 bucket. +<2> The CloudFront Origin Access Identity (OAI) that accesses the S3 bucket. +<3> Specifies that CloudFront can access all objects (`/*`) inside of the S3 bucket. \ No newline at end of file diff --git a/modules/config-fields-storage-features.adoc b/modules/config-fields-storage-features.adoc index 720c146ef..eda237d7f 100644 --- a/modules/config-fields-storage-features.adoc +++ b/modules/config-fields-storage-features.adoc @@ -17,4 +17,5 @@ The following table describes the image storage features for {productname}: | **FEATURE_STORAGE_REPLICATION** | Boolean | Whether to automatically replicate between storage engines. + + **Default:** `false` + |=== diff --git a/modules/config-fields-storage-fields.adoc b/modules/config-fields-storage-fields.adoc index 63ce38b25..de700af8f 100644 --- a/modules/config-fields-storage-fields.adoc +++ b/modules/config-fields-storage-fields.adoc @@ -25,4 +25,5 @@ The following table describes the image storage configuration fields for {produc **Example**: `100G` + + **Default:** `20G` + |=== diff --git a/modules/config-fields-storage-gcp.adoc b/modules/config-fields-storage-gcp.adoc index ad630bd7e..67828c0e5 100644 --- a/modules/config-fields-storage-gcp.adoc +++ b/modules/config-fields-storage-gcp.adoc @@ -13,7 +13,9 @@ DISTRIBUTED_STORAGE_CONFIG: bucket_name: quay-bucket secret_key: FhDAYe2HeuAKfvZCAGyOioNaaRABCDEFGHIJKLMN storage_path: /datastorage/registry + boto_timeout: 120 <1> DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] DISTRIBUTED_STORAGE_PREFERENCE: - googleCloudStorage ----- \ No newline at end of file +---- +<1> Optional. The time, in seconds, until a timeout exception is thrown when attempting to read from a connection. The default is `60` seconds. Also encompasses the time, in seconds, until a timeout exception is thrown when attempting to make a connection. The default is `60` seconds. \ No newline at end of file diff --git a/modules/config-fields-storage-noobaa.adoc b/modules/config-fields-storage-noobaa.adoc index 0cde8b339..26aadd1ba 100644 --- a/modules/config-fields-storage-noobaa.adoc +++ b/modules/config-fields-storage-noobaa.adoc @@ -1,8 +1,8 @@ :_content-type: CONCEPT [id="config-fields-storage-noobaa"] -= OCS/NooBaa += OpenShift Container Storage/NooBaa -The following YAML shows a sample configuration using an Open Container Storage/NooBaa instance: +The following YAML shows a sample configuration using an OpenShift Container Storage/NooBaa instance: [source,yaml] ---- @@ -16,4 +16,8 @@ DISTRIBUTED_STORAGE_CONFIG: is_secure: 'true' port: '443' storage_path: /datastorage/registry ----- \ No newline at end of file + maximum_chunk_size_mb: 100 <1> + server_side_assembly: true <2> +---- +<1> Defines the maximum chunk size, in MB, for the final copy. Has no effect if `server_side_assembly` is set to `false`. +<2> Optional. Whether {productname} should try and use server side assembly and the final chunked copy instead of client assembly. Defaults to `true`. \ No newline at end of file diff --git a/modules/config-fields-storage-rados.adoc b/modules/config-fields-storage-rados.adoc index f677666c0..b7c9c2e45 100644 --- a/modules/config-fields-storage-rados.adoc +++ b/modules/config-fields-storage-rados.adoc @@ -1,22 +1,30 @@ :_content-type: CONCEPT [id="config-fields-storage-rados"] -= Ceph / RadosGW Storage / Hitachi HCP += Ceph Object Gateway/RadosGW storage -The following YAML shows a sample configuration using Ceph/RadosGW and Hitachi HCP storage: +The following YAML shows a sample configuration using Ceph/RadosGW. +[NOTE] +==== +RadosGW is an on-premises S3-compatible storage solution. Note that this differs from general *AWS S3Storage*, which is specifically designed for use with Amazon Web Services S3. This means that RadosGW implements the S3 API and requires credentials like `access_key`, `secret_key`, and `bucket_name`. For more information about Ceph Object Gateway and the S3 API, see link:https://docs.redhat.com/en/documentation/red_hat_ceph_storage/4/html/developer_guide/ceph-object-gateway-and-the-s3-api#ceph-object-gateway-and-the-s3-api[Ceph Object Gateway and the S3 API]. +==== + +.RadosGW with general s3 access [source,yaml] ---- DISTRIBUTED_STORAGE_CONFIG: - radosGWStorage: + radosGWStorage: <1> - RadosGWStorage - - access_key: access_key_here - secret_key: secret_key_here - bucket_name: bucket_name_here - hostname: hostname_here - is_secure: 'true' + - access_key: + bucket_name: + hostname: + is_secure: true port: '443' + secret_key: storage_path: /datastorage/registry -DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] -DISTRIBUTED_STORAGE_PREFERENCE: - - default + maximum_chunk_size_mb: 100 <2> + server_side_assembly: true <3> ---- +<1> Used for general s3 access. Note that general s3 access is not strictly limited to Amazon Web Services (AWS) s3, and can be used with RadosGW or other storage services. For an example of general s3 access using the AWS S3 driver, see "AWS S3 storage". +<2> Optional. Defines the maximum chunk size in MB for the final copy. Has no effect if `server_side_assembly` is set to `false`. +<3> Optional. Whether {productname} should try and use server side assembly and the final chunked copy instead of client assembly. Defaults to `true`. diff --git a/modules/config-fields-storage-swift.adoc b/modules/config-fields-storage-swift.adoc index 3edfa9b5e..63f6e7750 100644 --- a/modules/config-fields-storage-swift.adoc +++ b/modules/config-fields-storage-swift.adoc @@ -1,4 +1,4 @@ -:_content-type: CONCEPT +:_content-type: REFERENCE [id="config-fields-storage-swift"] = Swift storage @@ -13,7 +13,10 @@ DISTRIBUTED_STORAGE_CONFIG: swift_password: swift_password_here swift_container: swift_container_here auth_url: https://example.org/swift/v1/quay - auth_version: 1 + auth_version: 3 + os_options: + tenant_id: + user_domain_name: ca_cert_path: /conf/stack/swift.cert" storage_path: /datastorage/registry DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] diff --git a/modules/config-fields-tag-expiration.adoc b/modules/config-fields-tag-expiration.adoc index df5adb9e5..872e3f6ee 100644 --- a/modules/config-fields-tag-expiration.adoc +++ b/modules/config-fields-tag-expiration.adoc @@ -15,25 +15,50 @@ The following tag expiration configuration fields are available with {productnam (Required) | Array of string | If enabled, the options that users can select for expiration of tags in their namespace. + + **Pattern:** + -`^[0-9]+(w\|m\|d\|h\|s)$` +`^[0-9]+(y\|w\|m\|d\|h\|s)$` | **DEFAULT_TAG_EXPIRATION** + (Required) | String | The default, configurable tag expiration time for time machine. + + **Pattern:** + -`^[0-9]+(w\|m\|d\|h\|s)$` + +`^[0-9]+(y\w\|m\|d\|h\|s)$` + **Default:** `2w` | **FEATURE_CHANGE_TAG_EXPIRATION** | Boolean | Whether users and organizations are allowed to change the tag expiration for tags in their namespace. + + **Default:** True + +| **FEATURE_AUTO_PRUNE** | Boolean | When set to `True`, enables functionality related to the auto-pruning of tags. + + +*Default:* `False` + +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* |Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. + + + +**Default:** `300` + +|*DEFAULT_NAMESPACE_AUTOPRUNE_POLICY* | Object | The default organization-wide auto-prune policy. + +|{nbsp}{nbsp}{nbsp} *.method: number_of_tags* | Object | The option specifying the number of tags to keep. + +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *method: number_of_tags*, denotes the number of tags to keep. + + +For example, to keep two tags, specify `2`. + +|{nbsp}{nbsp}{nbsp} *.creation_date* | Object | The option specifying the duration of which to keep tags. +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *creation_date*, denotes how long to keep tags. + + +Can be set to seconds (`s`), days (`d`), months (`m`), weeks (`w`), or years (`y`). Must include a valid integer. For example, to keep tags for one year, specify `1y`. + +|*AUTO_PRUNING_DEFAULT_POLICY_POLL_PERIOD* |Integer | The period in which the auto-pruner worker runs at the registry level. By default, it is set to run one time per day (one time per 24 hours). Value must be in seconds. + |=== [id="example-config-fields-tag-expiration"] == Example tag expiration configuration -The following YAML shows a sample tag expiration configuration: +The following YAML example shows you a sample tag expiration configuration. -[source,terminal] +[source,yaml] ---- +# ... DEFAULT_TAG_EXPIRATION: 2w TAG_EXPIRATION_OPTIONS: - 0s @@ -41,4 +66,32 @@ TAG_EXPIRATION_OPTIONS: - 1w - 2w - 4w + - 3y +# ... +---- + +[id="example-auto-prune-policy-registry"] +== Registry-wide auto-prune policies examples + +The following YAML examples show you registry-wide auto-pruning examples by both number of tags and creation date. + +.Example registry auto-prune policy by number of tags +[source,yaml] +---- +# ... +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: number_of_tags + value: 10 <1> +# ... +---- +<1> In this scenario, ten tags remain. + +.Example registry auto-prune policy by creation date +[source,yaml] +---- +# ... +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: creation_date + value: 1y +# ... ---- \ No newline at end of file diff --git a/modules/config-fields-user.adoc b/modules/config-fields-user.adoc index 10872393c..3ea1f8f94 100644 --- a/modules/config-fields-user.adoc +++ b/modules/config-fields-user.adoc @@ -49,13 +49,21 @@ *Default:* `False` -| **FEATURE_RESTRICTED_USERS** | Boolean | When set with `RESTRICTED_USERS_WHITELIST`, restricted users cannot create organizations or content in their own namespace. Normal permissions apply for an organization's membership, for example, a restricted user will still have normal permissions in organizations based on the teams that they are members of. +|**FEATURE_SUPERUSERS_ORG_CREATION_ONLY** |Boolean | Whether to only allow superusers to create organizations. + +*Default:* `False` + +| **FEATURE_RESTRICTED_USERS** | Boolean | When set to `True` with `RESTRICTED_USERS_WHITELIST`: + +* All normal users and superusers are restricted from creating organizations or content in their own namespace unless they are allowlisted via `RESTRICTED_USERS_WHITELIST`. + +* Restricted users retain their normal permissions within organizations based on team memberships. *Default:* `False` | **RESTRICTED_USERS_WHITELIST** | String | When set with `FEATURE_RESTRICTED_USERS: true`, specific users are excluded from the `FEATURE_RESTRICTED_USERS` setting. -| **GLOBAL_READONLY_SUPER_USERS** | String | When set, grants users of this list read access to all repositories, regardless of whether they are public repositories. +| **GLOBAL_READONLY_SUPER_USERS** | String | When set, grants users of this list read access to all repositories, regardless of whether they are public repositories. Only works for those superusers defined with the `SUPER_USERS` configuration field. |=== diff --git a/modules/config-fields-v2-ui.adoc b/modules/config-fields-v2-ui.adoc index 30d65e8f6..7ec14a576 100644 --- a/modules/config-fields-v2-ui.adoc +++ b/modules/config-fields-v2-ui.adoc @@ -1,12 +1,48 @@ :_content-type: CONCEPT [id="config-fields-v2-ui"] -= User interface v2 configuration field += User interface v2 configuration fields -.User interface v2 configuration field +.User interface v2 configuration fields [cols="3a,1a,2a",options="header"] |=== |Field | Type |Description | **FEATURE_UI_V2** | Boolean | When set, allows users to try the beta UI environment. - ++ *Default:* `False` -|=== \ No newline at end of file + +|**FEATURE_UI_V2_REPO_SETTINGS** |Boolean | When set to `True`, enables repository settings in the {productname} v2 UI. ++ +*Default:* `False` +|=== + + +[id="reference-miscellaneous-v2-ui"] +== v2 user interface configuration + +With `FEATURE_UI_V2` enabled, you can toggle between the current version of the user interface and the new version of the user interface. + +[IMPORTANT] +==== +* This UI is currently in beta and subject to change. In its current state, users can only create, view, and delete organizations, repositories, and image tags. +* When running {productname} in the old UI, timed-out sessions would require that the user input their password again in the pop-up window. With the new UI, users are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. +* There is a discrepancy in how image manifest sizes are reported between the legacy UI and the new UI. In the legacy UI, image manifests were reported in mebibytes. In the new UI, {productname} uses the standard definition of megabyte (MB) to report image manifest sizes. +==== + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_UI_V2` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +FEATURE_TEAM_SYNCING: false +FEATURE_UI_V2: true +FEATURE_USER_CREATION: true +--- +---- + +. Log in to your {productname} deployment. + +. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to new UI, and then click *Use Beta Environment*, for example: ++ +image:38-ui-toggle.png[{productname} v2 UI toggle] \ No newline at end of file diff --git a/modules/config-file-intro.adoc b/modules/config-file-intro.adoc index b8f1c4041..6cd0ea5d5 100644 --- a/modules/config-file-intro.adoc +++ b/modules/config-file-intro.adoc @@ -6,9 +6,13 @@ To deploy a standalone instance of {productname}, you must provide the minimal c After supplying the required fields, you can validate your configuration. If there are any issues, they will be highlighted. +//// [NOTE] ==== -It is possible to use the configuration API to validate the configuration, but this requires starting the Quay container in configuration mode. For more information, see "Using the configuration tool." +It is possible to use the configuration API to validate the configuration, but this requires starting the `Quay` container in configuration mode. + +To deploy the configuration tool locally, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.10/html-single/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/index#poc-getting-started[Getting started with {productname}] and follow the instructions up to "Configuring {productname}". ==== +//// For changes to take effect, the registry must be restarted. \ No newline at end of file diff --git a/modules/config-file-minimal.adoc b/modules/config-file-minimal.adoc index b00653e4b..ba7119d8e 100644 --- a/modules/config-file-minimal.adoc +++ b/modules/config-file-minimal.adoc @@ -46,23 +46,21 @@ TAG_EXPIRATION_OPTIONS: - 1w - 2w - 4w + - 3y USER_EVENTS_REDIS: host: quay-server.example.com port: 6379 ssl: false ---- -[NOTE] -==== -The `SETUP_COMPLETE` field indicates that the configuration has been validated. You should use the configuration editor tool to validate your configuration before starting the registry. -==== - [id="config-local-storage"] == Local storage -Using local storage for images is only recommended when deploying a registry for proof of concept purposes. +Using local storage for images is only recommended when deploying a registry for _proof of concept_ purposes. + +When configuring local storage, storage is specified on the command line when starting the registry. -When configuring local storage, storage is specified on the command line when starting the registry. The following command maps a local directory, `$QUAY/storage` to the `datastorage` path in the container: +The following command maps a local directory, `$QUAY/storage` to the `datastorage` path in the container: [subs="verbatim,attributes"] ---- @@ -88,10 +86,12 @@ DISTRIBUTED_STORAGE_CONFIG: bucket_name: quay_bucket secret_key: FhDAYe2HeuAKfvZCAGyOioNaaRABCDEFGHIJKLMN storage_path: /datastorage/registry + boto_timeout: 120 <1> DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] DISTRIBUTED_STORAGE_PREFERENCE: - default ---- +<1> Optional. The time, in seconds, until a timeout exception is thrown when attempting to read from a connection. The default is `60` seconds. Also encompasses the time, in seconds, until a timeout exception is thrown when attempting to make a connection. The default is `60` seconds. When starting the registry using cloud storage, no configuration is required on the command line. For example: diff --git a/modules/config-intro.adoc b/modules/config-intro.adoc index ebeba98b3..aab8ea150 100644 --- a/modules/config-intro.adoc +++ b/modules/config-intro.adoc @@ -2,13 +2,9 @@ [id="config-intro"] = Getting started with {productname} configuration -{productname} can be deployed by an independent, standalone configuration, or by using the {ocp} {productname} Operator. +{productname} can be deployed by an independent, standalone configuration, or by using the {productname} Operator on {ocp}. -How you create, retrieve, update, and validate the {productname} configuration varies depending on the type of deployment you are using. However, the core configuration options are the same for either deployment type. Core configuration can be set by one of the following options: - -* Directly, by editing the `config.yaml` file. See "Editing the configuration file" for more information. -* Programmatically, by using the configuration API. See "Using the configuration API" for more information. -* Visually, by using the configuration tool UI. See "Using the configuration tool" for more information. +How you create, retrieve, update, and validate the {productname} configuration varies depending on the type of deployment you are using. However, the core configuration options are the same for either deployment type. Core configuration is primarily set through a `config.yaml` file, but can also be set by using the configuration API. For standalone deployments of {productname}, you must supply the minimum required configuration parameters before the registry can be started. The minimum requirements to start a {productname} registry can be found in the "Retrieving the current configuration" section. diff --git a/modules/config-preconfigure-automation.adoc b/modules/config-preconfigure-automation.adoc index 5546fb32b..02972be77 100644 --- a/modules/config-preconfigure-automation.adoc +++ b/modules/config-preconfigure-automation.adoc @@ -2,98 +2,78 @@ [id="config-preconfigure-automation"] = Pre-configuring {productname} for automation -{productname} has several configuration options that support automation. These options can be set before deployment to minimize the need to interact with the user interface. +{productname} supports several configuration options that enable automation. Users can configure these options before deployment to reduce the need for interaction with the user interface. [id="allowing-the-api-to-create-first-user"] == Allowing the API to create the first user -To create the first user using the `/api/v1/user/initialize` API, set the `FEATURE_USER_INITIALIZE` parameter to `true`. Unlike all other registry API calls which require an OAuth token that is generated by an OAuth application in an existing organization, the API endpoint does not require authentication. +To create the first user, users need to set the `FEATURE_USER_INITIALIZE` parameter to `true` and call the `/api/v1/user/initialize` API. Unlike all other registry API calls that require an OAuth token generated by an OAuth application in an existing organization, the API endpoint does not require authentication. -After you have deployed {productname}, you can use the API to create a user, for example, `quayadmin`, assuming that no other users have already been created. For more information see xref:using-the-api-to-create-first-user[Using the API to create the first user]. +Users can use the API to create a user such as `quayadmin` after deploying {productname}, provided no other users have been created. For more information, see xref:using-the-api-to-create-first-user[Using the API to create the first user]. [id="enabling-general-api-access"] == Enabling general API access -Set the config option `BROWSER_API_CALLS_XHR_ONLY` to `false` to allow general access to the {productname} registry API. +Users should set the `BROWSER_API_CALLS_XHR_ONLY` configuration option to `false` to allow general access to the {productname} registry API. [id="adding-super-user"] -== Adding a super user +== Adding a superuser -After deploying {productname}, you can create a user. We advise that the first user be given administrator privileges with full permissions. Full permissions can be configured in advance by using the `SUPER_USER` configuration object. For example: +After deploying {productname}, users can create a user and give the first user administrator privileges with full permissions. Users can configure full permissions in advance by using the `SUPER_USER` configuration object. For example: [source,yaml] ---- -... +# ... SERVER_HOSTNAME: quay-server.example.com SETUP_COMPLETE: true SUPER_USERS: - quayadmin -... +# ... ---- [id="restricting-user-creation"] == Restricting user creation -After you have configured a super user, you can restrict the ability to create new users to the super user group. Set the `FEATURE_USER_CREATION` to `false` to restrict user creation. For example: +After you have configured a superuser, you can restrict the ability to create new users to the superuser group by setting the `FEATURE_USER_CREATION` to `false`. For example: [source,yaml] ---- -... +# ... FEATURE_USER_INITIALIZE: true BROWSER_API_CALLS_XHR_ONLY: false SUPER_USERS: - quayadmin FEATURE_USER_CREATION: false -... +# ... ---- -[id="enabling-new-functionality-38"] -== Enabling new functionality +[id="enabling-new-functionality-310"] +== Enabling new functionality in {productname} {producty} -To use new {productname} 3.8 functionality, enable some or all of the following features: +To use new {productname} {producty} functions, enable some or all of the following features: [source,yaml] ---- -... +# ... FEATURE_UI_V2: true -FEATURE_LISTEN_IP_VERSION: -FEATURE_SUPERUSERS_FULL_ACCESS: true -GLOBAL_READONLY_SUPER_USERS: - - -FEATURE_RESTRICTED_USERS: true -RESTRICTED_USERS_WHITELIST: - - -... ----- - -[id="enabling-new-functionality-37"] -== Enabling new functionality - -To use new {productname} 3.7 functionality, enable some or all of the following features: - -[source,yaml] ----- -... -FEATURE_QUOTA_MANAGEMENT: true -FEATURE_BUILD_SUPPORT: true -FEATURE_PROXY_CACHE: true -FEATURE_STORAGE_REPLICATION: true -DEFAULT_SYSTEM_REJECT_QUOTA_BYTES: 102400000 -... +FEATURE_UI_V2_REPO_SETTINGS: true +FEATURE_AUTO_PRUNE: true +ROBOTS_DISALLOW: false +# ... ---- [id="suggested-configuration-for-automation"] == Suggested configuration for automation -The following `config.yaml` parameters are suggested for automation: +The following `config.yaml` parameters are suggested for automation: [source,yaml] ---- -... +# ... FEATURE_USER_INITIALIZE: true BROWSER_API_CALLS_XHR_ONLY: false SUPER_USERS: - quayadmin FEATURE_USER_CREATION: false -... +# ... ---- \ No newline at end of file diff --git a/modules/config-ui-custom-ssl-certs.adoc b/modules/config-ui-custom-ssl-certs.adoc index 686f40d2c..61b386bd5 100644 --- a/modules/config-ui-custom-ssl-certs.adoc +++ b/modules/config-ui-custom-ssl-certs.adoc @@ -1,14 +1,13 @@ -[[config-ui-custom-ssl-certs]] -= Custom SSL certificates UI +:_content-type: PROCEDURE +[id="config-ui-custom-ssl-certs"] += Custom SSL/TLS certificates UI -The config tool can be used to load custom certificates to facilitate access to resources such as external databases. Select the custom certs to be uploaded, ensuring that they are in PEM format, with an extension `.crt`. +The config tool can be used to load custom certificates to facilitate access to resources like external databases. Select the custom certs to be uploaded, ensuring that they are in PEM format, with an extension `.crt`. +image:ui-custom-ssl-certs.png[Custom SSL/TLS certificates] +The config tool also displays a list of any uploaded certificates. After you upload your custom SSL/TLS cert, it will appear in the list. For example: -image:ui-custom-ssl-certs.png[Custom SSL certificates] - -The config tool also displays a list of any uploaded certificates. Once you upload your custom SSL cert, it will appear in the list: - -image:ui-custom-ssl-certs-uploaded.png[Custom SSL certificates] +image:ui-custom-ssl-certs-uploaded.png[Custom SSL/TLS certificates] //As an alternative to using the config tool, you can place the custom certs in a folder named `extra_ca_certs` under the {productname} configdirectory where the `config.yaml` is located. \ No newline at end of file diff --git a/modules/config-ui-storage-georepl.adoc b/modules/config-ui-storage-georepl.adoc index 407fa72da..12ef8044d 100644 --- a/modules/config-ui-storage-georepl.adoc +++ b/modules/config-ui-storage-georepl.adoc @@ -1,31 +1,75 @@ +:_content-type: PROCEDURE [id="enable-storage-replication-standalone"] - -= Enable storage replication - standalone Quay += Enabling storage replication for standalone {productname} Use the following procedure to enable storage replication on {productname}. .Procedure -. In your {productname} config editor, locate the *Registry Storage* section. - -. Click *Enable Storage Replication*. - -. Add each of the storage engines to which data will be replicated. All storage engines to be used must be listed. +. Update your `config.yaml` file to include the storage engines to which data will be replicated. You must list all storage engines to be used: ++ +[source,yaml] +---- +# ... +FEATURE_STORAGE_REPLICATION: true +# ... +DISTRIBUTED_STORAGE_CONFIG: + usstorage: + - RHOCSStorage + - access_key: + bucket_name: + hostname: my.noobaa.hostname + is_secure: false + port: "443" + secret_key: + storage_path: /datastorage/registry + eustorage: + - S3Storage + - host: s3.amazon.com + port: "443" + s3_access_key: + s3_bucket: + s3_secret_key: + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - usstorage + - eustorage +# ... +---- -. If complete replication of all images to all storage engines is required, click *Replicate to storage engine by default* under each storage engine configuration. This ensures that all images are replicated to that storage engine. +. Optional. If complete replication of all images to all storage engines is required, you can replicate images to the storage engine by manually setting the `DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS` field. This ensures that all images are replicated to that storage engine. For example: ++ +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: + - usstorage + - eustorage +# ... +---- + [NOTE] ==== To enable per-namespace replication, contact {productname} support. ==== -. When finished, click *Save Configuration Changes*. The configuration changes will take effect after {productname} restarts. +. After adding storage and enabling *Replicate to storage engine by default* for geo-replication, you must sync existing image data across all storage. To do this, you must execute into the container by running the following command: ++ +[source,terminal] +---- +$ podman exec -it +---- -. After adding storage and enabling *Replicate to storage engine by default* for geo-replication, you must sync existing image data across all storage. To do this, you must `oc exec` (alternatively, `docker exec` or `kubectl exec`) into the container and enter the following commands: +. To sync the content after adding new storage, enter the following commands: + [source,terminal] ---- # scl enable python27 bash +---- ++ +[source,terminal] +---- # python -m util.backfillreplication ---- + diff --git a/modules/config-updates-310.adoc b/modules/config-updates-310.adoc new file mode 100644 index 000000000..e770305fe --- /dev/null +++ b/modules/config-updates-310.adoc @@ -0,0 +1,30 @@ +:_content-type: REFERENCE +[id="config-updates-310"] += Configuration updates for {productname} 3.10 + +The following sections detail new configuration fields added in {productname} 3.10. + +[id="auto-pruner-namespace"] +== Namespace auto-pruning configuration fields + +With {productname} 3.10, deployments can be configured to automatically prune old image tags by a specified, allotted amount, or by the time in which they were created. + +.Namespace auto-pruning configuration field +|=== +|Field | Type |Description +| **FEATURE_AUTO_PRUNE** | Boolean | When set to `True`, enables functionality related to the auto-pruning of tags. + + +*Default:* `False` + +|**SECURITY_SCANNER_V4_MANIFEST_CLEANUP** |Boolean | When set to `true` the {productname} garbage collector removes manifests that are not referenced by other tags or manifests. + + +*Default*: `True` + +|**ROBOTS_DISALLOW** |Boolean |When set to `true`, robot accounts are prevented from all interactions, as well as from being created + + +*Default*: `False` + +|**FEATURE_UI_V2_REPO_SETTINGS** |Boolean | When set to `True`, enables repository settings in the {productname} v2 UI. + + +*Default:* `False` +|=== \ No newline at end of file diff --git a/modules/config-updates-311.adoc b/modules/config-updates-311.adoc new file mode 100644 index 000000000..550ff4d11 --- /dev/null +++ b/modules/config-updates-311.adoc @@ -0,0 +1,59 @@ +:_content-type: REFERENCE +[id="config-updates-311"] += Configuration updates for {productname} 3.11 + +The following sections detail new configuration fields added in {productname} 3.11. + +[id="team-synchronization-configuration-field"] +== Team synchronization configuration field + +The following configuration field has been added for the team synchronization via OIDC feature: + +.Team synchronization configuration field +|=== + +|Field | Type |Description +|*PREFERRED_GROUP_CLAIM_NAME* | String | The key name within the OIDC token payload that holds information about the user's group memberships. + +|=== + +.Team synchronization example YAML configuration + +[source,yaml] +---- +# ... +PREFERRED_GROUP_CLAIM_NAME: +# ... +---- + +[id="aws-s3-sts-configuration-fields"] +== Configuration fields for AWS S3 STS deployments + +The following configuration fields have been added when configuring AWS STS for {productname}. These fields are used when configuring AWS S3 storage for your deployment. + +.AWS S3 STS configuration fields +|=== +|Field | Type |Description +| *.sts_role_arn* | String | The unique Amazon Resource Name (ARN) required when configuring AWS STS for {productname}. + +|*.sts_user_access_key* |String | The generated AWS S3 user access key required when configuring AWS STS for {productname}. + +|*.sts_user_secret_key* |String |The generated AWS S3 user secret key required when configuring AWS STS for {productname}. +|=== + +.AWS S3 STS example YAML configuration + +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_CONFIG: + default: + - STSS3Storage + - sts_role_arn: + s3_bucket: + s3_region: + storage_path: + sts_user_access_key: + sts_user_secret_key: +# ... +---- \ No newline at end of file diff --git a/modules/config-updates-312.adoc b/modules/config-updates-312.adoc new file mode 100644 index 000000000..9e7031221 --- /dev/null +++ b/modules/config-updates-312.adoc @@ -0,0 +1,132 @@ +:_content-type: REFERENCE +[id="config-updates-312"] += Configuration updates for {productname} 3.12 + +The following sections detail new configuration fields added in {productname} 3.12. + +[id="registry-auto-prune-configuration-fields"] +== Registry auto-pruning configuration fields + +The following configuration fields have been added to {productname} auto-pruning feature: +|=== +| Field | Type | Description +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* |Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. + + + +**Default:** `300` + +|*DEFAULT_NAMESPACE_AUTOPRUNE_POLICY* | Object | The default organization-wide auto-prune policy. + +|{nbsp}{nbsp}{nbsp} *.method: number_of_tags* | Object | The option specifying the number of tags to keep. + +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *method: number_of_tags*, denotes the number of tags to keep. + + +For example, to keep two tags, specify `2`. + +|{nbsp}{nbsp}{nbsp} *.method: creation_date* | Object | The option specifying the duration of which to keep tags. +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *creation_date*, denotes how long to keep tags. + + +Can be set to seconds (`s`), days (`d`), months (`m`), weeks (`w`), or years (`y`). Must include a valid integer. For example, to keep tags for one year, specify `1y`. + +|*AUTO_PRUNING_DEFAULT_POLICY_POLL_PERIOD* |Integer | The period in which the auto-pruner worker runs at the registry level. By default, it is set to run one time per day (one time per 24 hours). Value must be in seconds. + +|=== + +[id="oauth-reassign-configuration-field"] +== OAuth access token reassignment configuration field + +The following configuration field has been added for reassigning OAuth access tokens: + +|=== +| Field | Type | Description +| *FEATURE_ASSIGN_OAUTH_TOKEN* | Boolean | Allows organization administrators to assign OAuth tokens to other users. +|=== + +.Example OAuth access token reassignment YAML +[source,yaml] +---- +# ... +FEATURE_ASSIGN_OAUTH_TOKEN: true +# ... +---- + +[id="image-vulnerability-notification-field"] +== Vulnerability detection notification configuration field + +The following configuration field has been added to notify users on detected vulnerabilities based on security level: + +|=== +| Field | Type | Description +| *NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX* | String | Set minimal security level for new notifications on detected vulnerabilities. Avoids creation of large number of notifications after first index. If not defined, defaults to `High`. Available options include `Critical`, `High`, `Medium`, `Low`, `Negligible`, and `Unknown`. +|=== + +.Example image vulnerability notification YAML +[source,yaml] +---- +NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX: High +---- + +[id="oci-referrers-api-configuration-field"] +== OCI referrers API configuration field + +The following configuration field allows users to list OCI referrers of a manifest under a repository by using the v2 API: + +|=== +| Field | Type | Description +| *FEATURE_REFERRERS_API* | Boolean | Enables OCI 1.1's referrers API. +|=== + +.Example OCI referrers enablement YAML +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: true +# ... +---- + +[id="disable-strict-logging-configuration-field"] +== Disable strict logging configuration field + +The following configuration field has been added to address when external systems like Splunk or ElasticSearch are configured as audit log destinations but are intermittently unavailable. When set to `True`, the logging event is logged to the stdout instead. + +|=== +| Field | Type | Description +| *ALLOW_WITHOUT_STRICT_LOGGING* | Boolean | When set to `True`, if the external log system like Splunk or ElasticSearch is intermittently unavailable, allows users to push images normally. Events are logged to the stdout instead. Overrides `ALLOW_PULLS_WITHOUT_STRICT_LOGGING` if set. +|=== + +.Example strict logging YAML +[source,yaml] +---- +# ... +ALLOW_WITHOUT_STRICT_LOGGING: True +# ... +---- + +[id="notification-configuration-field"] +== Notification interval configuration field + +The following configuration field has been added to enhance {productname} notifications: + +|=== +| Field | Type | Description +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* | Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. By default, this field is set to notify {productname} users of events happening every 5 hours. +|=== + +.Example notification re-run YAML +[source,yaml] +---- +# ... +NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES: 10 +# ... +---- + +[id="clair-index-layer-size-configuration-field"] +== Clair indexing layer size configuration field + +The following configuration field has been added for the Clair security scanner, which allows {productname} administrators to set a maximum layer size allowed for indexing. + +|=== +| Field | Type | Description +| *SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE* | String | The maximum layer size allowed for indexing. If the layer size exceeds the configured size, the {productname} UI returns the following message: `The manifest for this tag has layer(s) that are too large to index by the Quay Security Scanner`. The default is `8G`, and the maximum recommended is `10G`. + + + *Example*: `8G` +|=== \ No newline at end of file diff --git a/modules/config-updates-313.adoc b/modules/config-updates-313.adoc new file mode 100644 index 000000000..e6eafd5e3 --- /dev/null +++ b/modules/config-updates-313.adoc @@ -0,0 +1,26 @@ +:_content-type: REFERENCE +[id="config-updates-313"] += Configuration updates for {productname} 3.13 + +The following sections detail new configuration fields added in {productname} 3.13. + +[id="disabling-pushes-configuration-field"] +== Disabling pushes to the {productname} registry + +The following configuration field has been added to disable the push of new content to the registry. + +|=== +| Field | Type | Description + +|*DISABLE_PUSHES* |Boolean | Disables pushes of new content to the registry while retaining all other functionality. Differs from `read-only` mode because database is not set as `read-only`. When `DISABLE_PUSHES` is set to `true`, the {productname} garbage collector is disabled. As a result, when `PERMANENTLY_DELETE_TAGS` is enabled, using the {productname} UI to permanently delete a tag does not result in the immediate deletion of a tag. Instead, the image stays in the backend storage until `DISABLE_PUSHES` is set to `false`, which re-enables the garbage collector. {productname} administrators should be aware of this caveat when using `DISABLE_PUSHES` and `PERMANENTLY_DELETE_TAGS` together. + + + + **Default:** False +|=== + +.Example DISABLE_PUSHES configuration field +[source,yaml] +---- +# ... +DISABLE_PUSHES: true +# ... +---- diff --git a/modules/config-updates-314.adoc b/modules/config-updates-314.adoc new file mode 100644 index 000000000..b90a331ec --- /dev/null +++ b/modules/config-updates-314.adoc @@ -0,0 +1,79 @@ +:_content-type: REFERENCE +[id="config-updates-314"] += Configuration updates for {productname} 3.14 + +The following sections detail new configuration fields added in {productname} 3.14. + +[id="model-card-rendering"] +== Model card rendering configuration fields + +The following configuration fields have been added to support model card rendering on the v2 UI. + +|=== +| Field | Type | Description + +|*FEATURE_UI_MODELCARD* |Boolean | Enables *Model card* image tab in UI. Defaults to `true`. +|*UI_MODELCARD_ARTIFACT_TYPE* | String | Defines the model card artifact type. +|*UI_MODELCARD_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|*UI_MODELCARD_LAYER_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|=== + +.Example model card YAML +[source,yaml] +---- +FEATURE_UI_MODELCARD: true <1> +UI_MODELCARD_ARTIFACT_TYPE: application/x-mlmodel <2> +UI_MODELCARD_ANNOTATION: <3> + org.opencontainers.image.description: "Model card metadata" +UI_MODELCARD_LAYER_ANNOTATION: <4> + org.opencontainers.image.title: README.md +---- +<1> Enables the *Model Card* image tab in the UI. +<2> Defines the model card artifact type. In this example, the artifact type is `application/x-mlmodel`. +<3> Optional. If an image does not have an `artifactType` defined, this field is checked at the manifest level. If a matching annotation is found, the system then searches for a layer with an annotation matching `UI_MODELCARD_LAYER_ANNOTATION`. +<4> Optional. If an image has an `artifactType` defined and multiple layers, this field is used to locate the specific layer containing the model card. + +[id="new-quay-footer-fields"] +== Footer configuration fields + +The following configuration fields have been added to the original (v1) UI. You can use these fields to customize the footer of your on-prem v1 UI. + +[NOTE] +==== +These fields are currently unavailable on the {productname} v2 UI. +==== + +|=== +| Field | Type | Description + +|*FOOTER_LINKS* |Object | Enable customization of footer links in {productname}'s UI for on-prem installations. + +|*.TERMS_OF_SERVICE_URL* | String | Custom terms of service for on-prem installations. + + + +**Example:** + +`https://index.hr` + +|*.PRIVACY_POLICY_URL* | String | Custom privacy policy for on-prem installations. + + + +**Example:** + +`https://index.hr` +|*.SECURITY_URL* | String | Custom security page for on-prem installations. + + + +**Example:** + +`https://index.hr` + +| **.ABOUT_URL** | String | Custom about page for on-prem installations. + + + +**Example:** + +`https://index.hr` +|=== + +.Example footer links YAML +[source,yaml] +---- +FOOTER_LINKS: + "TERMS_OF_SERVICE_URL": "https://www.index.hr" + "PRIVACY_POLICY_URL": "https://www.example.hr" + "SECURITY_URL": "https://www.example.hr" + "ABOUT_URL": "https://www.example.hr" +---- \ No newline at end of file diff --git a/modules/config-updates-36.adoc b/modules/config-updates-36.adoc index adc473303..7f76adff0 100644 --- a/modules/config-updates-36.adoc +++ b/modules/config-updates-36.adoc @@ -15,7 +15,7 @@ The following configuration fields have been introduced with {productname} 3.6: |**FEATURE_USER_INITIALIZE** |If set to true, the first `User` account can be created by the API `/api/v1/user/initialize`. For more information, see xref:config-preconfigure-automation[Pre-configuring {productname} for automation]. -| **ALLOWED_OCI_ARTIFACT_TYPES** |Helm, cosign, and ztsd compression scheme artifacts are built into {productname} 3.6 by default. For any other Open Container Initiative (OCI) media types that are not supported by default, you can add them to the `ALLOWED_OCI_ARTIFACT_TYPES` configuration in Quay's `config.yaml` For more information, see xref:other-oci-artifacts-with-quay[Adding other OCI media types to Quay]. +| **ALLOWED_OCI_ARTIFACT_TYPES** |Helm, cosign, and ztsd compression scheme artifacts are built into {productname} 3.6 by default. For any other Open Container Initiative (OCI) artifact types that are not supported by default, you can add them to the `ALLOWED_OCI_ARTIFACT_TYPES` configuration in Quay's `config.yaml` For more information, see xref:other-oci-artifacts-with-quay[Adding other OCI media types to Quay]. | **CREATE_PRIVATE_REPO_ON_PUSH** |Registry users now have the option to set `CREATE_PRIVATE_REPO_ON_PUSH` in their `config.yaml` to `True` or `False` depending on their security needs. diff --git a/modules/config-updates-38.adoc b/modules/config-updates-38.adoc index 19c4af700..ff0198e00 100644 --- a/modules/config-updates-38.adoc +++ b/modules/config-updates-38.adoc @@ -1,4 +1,4 @@ -:_content-type: CONCEPT +:_content-type: REFERENCE [id="config-updates-38"] = Configuration updates for Quay 3.8 diff --git a/modules/config-updates-39.adoc b/modules/config-updates-39.adoc new file mode 100644 index 000000000..72acd5778 --- /dev/null +++ b/modules/config-updates-39.adoc @@ -0,0 +1,169 @@ +:_content-type: REFERENCE +[id="config-updates-39"] += Configuration updates for {productname} 3.9 + +The following sections detail new configuration fields added in {productname} 3.9. + +[id="tracking-audit-logins"] +== Action log audit configuration + +With {productname} 3.9, audit logins are tracked by default. + +.Audit logs configuration field +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **ACTION_LOG_AUDIT_LOGINS** | Boolean | When set to `True`, tracks advanced events such as logging into, and out of, the UI, and logging in using Docker for regular users, robot accounts, and for application-specific token accounts. + + + +**Default:** `True` +|=== + +[id="splunk-action-log-field"] +== Addition of Splunk action logs + +With {productname} 3.9, Splunk can be configured under the *LOGS_MODEL* parameter. + +.Splunk configuration fields +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **LOGS_MODEL** | String | Specifies the preferred method for handling log data. + + + +**Values:** One of `database`, `transition_reads_both_writes_es`, `elasticsearch`, `splunk` + +**Default:** `database` +|=== + +[id="new-model-config-options"] +=== LOGS_MODEL_CONFIG additions + +The following *LOGS_MODEL_CONFIG* options are available when configuring Splunk. + +* **LOGS_MODEL_CONFIG** [object]: Logs model config for action logs +** **producer** [string]: `splunk` +** **splunk_config** [object]: Logs model configuration for Splunk action logs or the Splunk cluster configuration +*** **host** [string]: Splunk cluster endpoint. +*** **port** [integer]: Splunk management cluster endpoint port. +*** **bearer_token** [string]: The bearer token for Splunk. +*** **verify_ssl** [boolean]: Enable (`True`) or disable (`False`) TLS/SSL verification for HTTPS connections. +*** **index_prefix** [string]: Splunk's index prefix. +*** **ssl_ca_path** [string]: The relative container path to a single `.pem` file containing a certificate authority (CA) for SSL validation. + +[id="splunk-example-yaml"] +=== Example configuration for Splunk + +The following YAML entry provides an example configuration for Splunk. + +.Splunk config.yaml example +[source,yaml] +---- +--- +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk + splunk_config: + host: http://.remote.csb + port: 8089 + bearer_token: + url_scheme: + verify_ssl: False + index_prefix: + ssl_ca_path: +--- +---- + +[id="quota-management-config-fields"] +== Quota management configuration fields + +The following configuration fields have been added to enhance the {productname} quota management feature. + +.{productname} 3.9 quota management configuration fields +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description + +| **QUOTA_BACKFILL** | Boolean | Enables the quota backfill worker to calculate the size of pre-existing blobs. + + + +**Default**: `True` + +|**QUOTA_TOTAL_DELAY_SECONDS** |String | The time delay for starting the quota backfill. Rolling deployments can cause incorrect totals. This field *must* be set to a time longer than it takes for the rolling deployment to complete. + + + +**Default**: `1800` + +|**PERMANENTLY_DELETE_TAGS** |Boolean | Enables functionality related to the removal of tags from the time machine window. + + + +**Default**: `False` + +|**RESET_CHILD_MANIFEST_EXPIRATION** |Boolean |Resets the expirations of temporary tags targeting the child manifests. With this feature set to `True`, child manifests are immediately garbage collected. + + + +**Default**: `False` + +|=== + +[id="quota-management-config-settings-39"] +=== Possible quota management configuration settings + +The following table explains possible quota management configuration settings in {productname} 3.9. + +.Quota management configuration options +[cols="2a,1a,2a",options="header"] +|=== +|*FEATURE_QUOTA_MANAGEMENT* |*QUOTA_BACKFILL* |*OUTCOME* +|`true` |`true` | With these features configured as `true`, quota management is enabled and working for {productname} 3.9. For more information about configuring quota management for {productname} 3.9, see "Quota management for {productname} 3.9". +|`true` |`false` |With `FEATURE_QUOTA_MANAGEMENT` set to `true`, and `QUOTA_BACKFILL` set to `false`, the quota management feature has been enabled. However, pre-existing images from a prior (N-1) y-stream version of {productname} (for example, 3.8), must be backfilled before quota calculation can continue. To backfill image sizes, set `QUOTA_BACKFILL` to `true`. +|`false` |`false` | With these features configured as `false`, the quota management feature is disabled. +|`false` |`true` | With `FEATURE_QUOTA_MANAGEMENT` set to `false`, and `QUOTA_BACKFILL` set to `true`, the quota management feature is disabled. +|=== + +[id="suggested-management-config-settings-39-quota"] +=== Suggested quota management configuration settings + +The following YAML is the suggested configuration when enabling quota management. + +.Suggested quota management configuration +[source,yaml] +---- +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_GARBAGE_COLLECTION: true +PERMANENTLY_DELETE_TAGS: true +QUOTA_TOTAL_DELAY_SECONDS: 1800 +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +[id=postgresql-pvc-backup-config-fields] +== PostgreSQL PVC backup environment variable + +The following environment variable has been added to configure whether {productname} automatically removes old persistent volume claims (PVCs) when upgrading from version 3.8 -> 3.9: + +.{productname} 3.9 PostgreSQL backup environment variable +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description +| *POSTGRES_UPGRADE_DELETE_BACKUP* |Boolean | When set to `True`, removes old persistent volume claims (PVCs) after upgrading. ++ +**Default**: `False` + +|=== + +[id="pvc-backup-example-yaml"] +=== Example configuration for PostgreSQL PVC backup + +The following `Subscription` object provides an example configuration for backing up PostgreSQL 10 PVCs. + +.`Subscription` object for PostgreSQL 10 PVCs +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: quay-operator + namespace: quay-enterprise +spec: + channel: stable-3.8 + name: quay-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: POSTGRES_UPGRADE_DELETE_BACKUP + value: "false" +---- diff --git a/modules/configuring-api-calls.adoc b/modules/configuring-api-calls.adoc new file mode 100644 index 000000000..922bfb314 --- /dev/null +++ b/modules/configuring-api-calls.adoc @@ -0,0 +1,17 @@ +[id="configuring-api-calls"] += Configuring {productname} to accept API calls + +Prior to using the {productname} API, you must disable `BROWSER_API_CALLS_XHR_ONLY` in your `config.yaml` file. This allows you to avoid such errors as `API calls must be invoked with an X-Requested-With header if called from a browser`. + +.Procedure + +. In your {productname} `config.yaml` file, set `BROWSER_API_CALLS_XHR_ONLY` to `false`. For example: ++ +[source,yaml] +---- +# ... +BROWSER_API_CALLS_XHR_ONLY: false +# ... +---- + +. Restart your {productname} deployment. \ No newline at end of file diff --git a/modules/configuring-aws-sts-quay.adoc b/modules/configuring-aws-sts-quay.adoc new file mode 100644 index 000000000..87d9c3c03 --- /dev/null +++ b/modules/configuring-aws-sts-quay.adoc @@ -0,0 +1,125 @@ +:_content-type: PROCEDURE +[id="configuring-aws-sts-quay"] += Configuring AWS STS for {productname} + +Support for Amazon Web Services (AWS) Security Token Service (STS) is available for standalone {productname} deployments and {productname-ocp}. AWS STS is a web service for requesting temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users and for users that you authenticate, or _federated users_. This feature is useful for clusters using Amazon S3 as an object storage, allowing {productname} to use STS protocols to authenticate with Amazon S3, which can enhance the overall security of the cluster and help to ensure that access to sensitive data is properly authenticated and authorized. + +Configuring AWS STS is a multi-step process that requires creating an AWS IAM user, creating an S3 role, and configuring your {productname} `config.yaml` file to include the proper resources. + +Use the following procedures to configure AWS STS for {productname}. + +[id="creating-am-user"] +== Creating an IAM user + +Use the following procedure to create an IAM user. + +.Procedure + +. Log in to the Amazon Web Services (AWS) console and navigate to the Identity and Access Management (IAM) console. + +. In the navigation pane, under *Access management* click *Users*. + +. Click *Create User* and enter the following information: + +.. Enter a valid username, for example, `quay-user`. + +.. For *Permissions options*, click *Add user to group*. + +. On the *review and create* page, click *Create user*. You are redirected to the *Users* page. + +. Click the username, for example, *quay-user*. + +. Copy the ARN of the user, for example, `arn:aws:iam::123492922789:user/quay-user`. + +. On the same page, click the *Security credentials* tab. + +. Navigate to *Access keys*. + +. Click *Create access key*. + +. On the *Access key best practices & alternatives* page, click *Command Line Interface (CLI)*, then, check the confirmation box. Then click *Next*. + +. Optional. On the *Set description tag - optional* page, enter a description. + +. Click *Create access key*. + +. Copy and store the access key and the secret access key. ++ +[IMPORTANT] +==== +This is the only time that the secret access key can be viewed or downloaded. You cannot recover it later. However, you can create a new access key any time. +==== + +. Click *Done*. + +[id="creating-s3-role"] +== Creating an S3 role + +Use the following procedure to create an S3 role for AWS STS. + +.Prerequisites + +* You have created an IAM user and stored the access key and the secret access key. + +.Procedure + +. If you are not already, navigate to the IAM dashboard by clicking *Dashboard*. + +. In the navigation pane, click *Roles* under *Access management*. + +. Click *Create role*. + +* Click *Custom Trust Policy*, which shows an editable JSON policy. By default, it shows the following information: ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Statement1", + "Effect": "Allow", + "Principal": {}, + "Action": "sts:AssumeRole" + } + ] +} +---- + +. Under the `Principal` configuration field, add your AWS ARN information. For example: ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Statement1", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::123492922789:user/quay-user" + }, + "Action": "sts:AssumeRole" + } + ] +} +---- + +. Click *Next*. + +. On the *Add permissions* page, type `AmazonS3FullAccess` in the search box. Check the box to add that policy to the S3 role, then click *Next*. + +. On the *Name, review, and create* page, enter the following information: + +.. Enter a role name, for example, `example-role`. + +.. Optional. Add a description. + +. Click the *Create role* button. You are navigated to the *Roles* page. Under *Role name*, the newly created S3 should be available. + +//// +[id="configuring-quay-operator-use-aws-sts"] +== Configuring the {productname} to use AWS STS + +Depending on your deployment type, whether standalone or on {ocp}, you can use one of the following procedures to edit your `config.yaml` file to use AWS STS. +//// diff --git a/modules/configuring-cert-based-auth-quay-cloudsql.adoc b/modules/configuring-cert-based-auth-quay-cloudsql.adoc new file mode 100644 index 000000000..e7bad5c12 --- /dev/null +++ b/modules/configuring-cert-based-auth-quay-cloudsql.adoc @@ -0,0 +1,116 @@ +:_content-type: PROCEDURE +[id="configuring-cert-based-auth-quay-sql"] += Configuring certificate-based authentication with SQL + +The following procedure demonstrates how to connect {productname} with an SQL database using secure client-side certificates. This method ensures both connectivity and authentication through Certificate Trust Verification, as it verifies the SQL server's certificate against a trusted Certificate Authority (CA). This enhances the security of the connection between {productname} and your SQL server while simplifying automation for your deployment. Although the example uses Google Cloud Platform's CloudSQL, the procedure also applies to PostgreSQL and other supported databases. + +.Prerequisites + +* You have generated custom Certificate Authorities (CAs) and your SSL/TLS certificates and keys are available in `PEM` format that will be used to generate an SSL connection with your CloudSQL database. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#ssl-tls-quay-overview[SSL and TLS for {productname}]. +* You have `base64 decoded` the original config bundle into a `config.yaml` file. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-config-cli-download[Downloading the existing configuration]. +* You are using an externally managed PostgreSQL or CloudSQL database. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-unmanaged-postgres[Using and existing PostgreSQL database] with the `DB_URI` variable set. +* Your externally managed PostgreSQL or CloudSQL database is configured for SSL/TLS. +* The `postgres` component of your `QuayRegistry` CRD is set to `managed: false`, and your CloudSQL database is set with the `DB_URI` configuration variable. The following procedure uses `postgresql://:@:/`. + +.Procedure + +. After you have generated the CAs and SSL/TLS certificates and keys for your CloudSQL database and ensured that they are in `.pem` format, test the SSL connection to your CloudSQL server: + +.. Initiate a connection to your CloudSQL server by entering the following command: ++ +[source,terminal] +---- +$ psql "sslmode=verify-ca sslrootcert=.pem sslcert=.pem sslkey=.pem hostaddr= port=<5432> user= dbname=" +---- + +. In your {productname} directory, create a new YAML file, for example, `quay-config-bundle.yaml`, by running the following command: ++ +[source,terminal] +---- +$ touch quay-config-bundle.yaml +---- + +. Create a `postgresql-client-certs` resource by entering the following command: ++ +[source,terminal] +---- +$ oc -n create secret generic postgresql-client-certs \ +--from-file config.yaml= <1> +--from-file=tls.crt= <2> +--from-file=tls.key= <3> +--from-file=ca.crt= <4> +---- +<1> Where` ` is your `base64 decoded` `config.yaml` file. +<2> Where `ssl_client_certificate.pem` is your SSL certificate in `.pem` format. +<3> Where `ssl_client_key.pem` is your SSL key in `.pem` format. +<4> Where `ssl_server_certificate.pem` is your SSL root CA in `.pem` format. + +. Edit your ``quay-config-bundle.yaml` file to include the following database connection settings: ++ +[IMPORTANT] +==== +* The information included in the `DB_CONNECTION_ARGS` variable, for example, `sslmode`, `sslrootcert`, `sslcert`, and `sslkey` *must* match the information appended to the `DB_URI` variable. Failure to match might result in a failed connection. +* You cannot specify custom filenames or paths. Certificate file paths for `sslrootcert`, `sslcert`, and `sslkey` are hardcoded defaults and mounted into the `Quay` pod from the Kubernetes secret. You must adhere to the following naming conventions or it will result in a failed connection. +==== ++ +[source,yaml] +---- +DB_CONNECTION_ARGS: + autorollback: true + sslmode: verify-ca <1> + sslrootcert: /.postgresql/root.crt <2> + sslcert: /.postgresql/postgresql.crt <3> + sslkey: /.postgresql/postgresql.key <4> + threadlocals: true <5> +DB_URI: postgresql://:@:/?sslmode=verify-full&sslrootcert=/.postgresql/root.crt&sslcert=/.postgresql/postgresql.crt&sslkey=/.postgresql/postgresql.key <6> +---- +<1> Using `verify-ca` ensures that the database connection uses SSL/TLS and verifies the server certificate against a trusted CA. This can work with both trusted CA and self-signed CA certificates. However, this mode does not verify the hostname of the server. For full hostname and certificate verification, use `verify-full`. For more information about the configuration options available, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-postgres[PostgreSQL SSL/TLS connection arguments]. +<2> The `root.crt` file contains the root certificate used to verify the SSL/TLS connection with your CloudSQL database. This file is mounted in the `Quay` pod from the Kubernetes secret. +<3> The `postgresql.crt` file contains the client certificate used to authenticate the connection to your CloudSQL database. This file is mounted in the `Quay` pod from the Kubernetes secret. +<4> The `postgresql.key` file contains the private key associated with the client certificate. This file is mounted in the `Quay` pod from the Kubernetes secret. +<5> Enables auto-rollback for connections. +<6> The URI that accesses your CloudSQL database. Must be appended with the `sslmode` type, your `root.crt`, `postgresql.crt`, and `postgresql.key` files. The SSL/TLS information included in `DB_URI` must match the information provided in `DB_CONNECTION_ARGS`. If you are using CloudSQL, you must include your database username and password in this variable. + +. Create the `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc create -n -f quay-config-bundle.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +secret/quay-config-bundle created +---- + +. Update the `QuayRegistry` YAML file to reference the `quay-config-bundle` object by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"configBundleSecret":"quay-config-bundle"}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. Ensure that your `QuayRegistry` YAML file has been updated to use the extra CA certificate `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml +---- ++ +.Example output ++ +[source,terminal] +---- +# ... + configBundleSecret: quay-config-bundle +# ... +---- diff --git a/modules/configuring-clair-updaters.adoc b/modules/configuring-clair-updaters.adoc new file mode 100644 index 000000000..95d4cd30c --- /dev/null +++ b/modules/configuring-clair-updaters.adoc @@ -0,0 +1,594 @@ + +[id="configuring-updaters"] += Configuring updaters + +Updaters can be configured by the `updaters.sets` key in your `clair-config.yaml` file. + +[IMPORTANT] +==== +* If the `sets` field is not populated, it defaults to using all sets. In using all sets, Clair tries to reach the URL or URLs of each updater. If you are using a proxy environment, you must add these URLs to your proxy allowlist. +* If updaters are being run automatically within the matcher process, which is the default setting, the period for running updaters is configured under the matcher's configuration field. +==== + +[id="selecting-updater-sets"] +== Selecting specific updater sets + +Use the following references to select one, or multiple, updaters for your {productname} deployment. + +[discrete] +[id="configuring-clair-multiple-updaters"] +=== Configuring Clair for multiple updaters + +.Multiple specific updaters +[source,yaml] +---- +#... +updaters: + sets: + - alpine + - aws + - osv +#... +---- + +[discrete] +[id="configuring-clair-alpine"] +=== Configuring Clair for Alpine + +.Alpine config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - alpine +#... +---- + +[discrete] +[id="configuring-clair-aws"] +=== Configuring Clair for AWS + +.AWS config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - aws +#... +---- + +[discrete] +[id="configuring-clair-debian"] +=== Configuring Clair for Debian + +.Debian config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - debian +#... +---- + +[discrete] +[id="configuring-clair-clair-cvss"] +=== Configuring Clair for Clair CVSS + +.Clair CVSS config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - clair.cvss +#... +---- + +[discrete] +[id="configuring-clair-oracle"] +=== Configuring Clair for Oracle + +.Oracle config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - oracle +#... +---- + +[discrete] +[id="configuring-clair-photon"] +=== Configuring Clair for Photon +.Photon config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - photon +#... +---- + +[discrete] +[id="configuring-clair-suse"] +=== Configuring Clair for SUSE + +.SUSE config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - suse +#... +---- + +[discrete] +[id="configuring-clair-ubuntu"] +=== Configuring Clair for Ubuntu + +.Ubuntu config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - ubuntu +#... +---- + +[discrete] +[id="configuring-clair-osv"] +=== Configuring Clair for OSV + +.OSV config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - osv +#... +---- + +[id="full-rhel-coverage"] +== Selecting updater sets for full {rhel} coverage + +For full coverage of vulnerabilities in {rhel}, you must use the following updater sets: + +* `rhel`. This updater ensures that you have the latest information on the vulnerabilities that affect {rhel-short}. +* `rhcc`. This updater keeps track of vulnerabilities related to Red hat's container images. +* `clair.cvss`. This updater offers a comprehensive view of the severity and risk assessment of vulnerabilities by providing Common Vulnerabilities and Exposures (CVE) scores. +* `osv`. This updater focuses on tracking vulnerabilities in open-source software components. This updater is recommended due to how common the use of Java and Go are in {rhel-short} products. + +.{rhel-short} updaters example +[source,yaml] +---- +#... +updaters: + sets: + - rhel + - rhcc + - clair.cvss + - osv +#... +---- + +[id="configuring-specific-updaters"] +== Advanced updater configuration + +In some cases, users might want to configure updaters for specific behavior, for example, if you want to allowlist specific ecosystems for the Open Source Vulnerabilities (OSV) updaters. + +Advanced updater configuration might be useful for proxy deployments or air gapped deployments. Configuration for specific updaters in these scenarios can be passed by putting a key underneath the `config` environment variable of the `updaters` object. Users should examine their Clair logs to double-check names. + +The following YAML snippets detail the various settings available to some Clair updater + +[IMPORTANT] +==== +For more users, advanced updater configuration is unnecessary. +==== + +[discrete] +=== Configuring the alpine updater + +[source,yaml] +---- +#... +updaters: + sets: + - apline + config: + alpine: + url: https://secdb.alpinelinux.org/ +#... +---- + +[discrete] +=== Configuring the debian updater + +[source,yaml] +---- +#... +updaters: + sets: + - debian + config: + debian: + mirror_url: https://deb.debian.org/ + json_url: https://security-tracker.debian.org/tracker/data/json +#... +---- + +[discrete] +=== Configuring the clair.cvss updater + +[source,yaml] +---- +#... +updaters: + config: + clair.cvss: + url: https://nvd.nist.gov/feeds/json/cve/1.1/ +#... +---- + +[discrete] +=== Configuring the oracle updater + +[source,yaml] +---- +#... +updaters: + sets: + - oracle + config: + oracle-2023-updater: + url: + - https://linux.oracle.com/security/oval/com.oracle.elsa-2023.xml.bz2 + oracle-2022-updater: + url: + - https://linux.oracle.com/security/oval/com.oracle.elsa-2022.xml.bz2 +#... +---- + +[discrete] +=== Configuring the photon updater + +[source,yaml] +---- +#... +updaters: + sets: + - photon + config: + photon: + url: https://packages.vmware.com/photon/photon_oval_definitions/ +#... +---- + +[discrete] +=== Configuring the rhel updater + +[source,yaml] +---- +#... +updaters: + sets: + - rhel + config: + rhel: + url: https://access.redhat.com/security/data/oval/v2/PULP_MANIFEST + ignore_unpatched: true <1> +#... +---- +<1> Boolean. Whether to include information about vulnerabilities that do not have corresponding patches or updates available. + +[discrete] +=== Configuring the rhcc updater + +[source,yaml] +---- +#... +updaters: + sets: + - rhcc + config: + rhcc: + url: https://access.redhat.com/security/data/metrics/cvemap.xml +#... +---- + +[discrete] +=== Configuring the suse updater + +[source,yaml] +---- +#... +updaters: + sets: + - suse + config: + suse: + url: https://support.novell.com/security/oval/ +#... +---- + +[discrete] +=== Configuring the ubuntu updater + +[source,yaml] +---- +#... +updaters: + config: + ubuntu: + url: https://api.launchpad.net/1.0/ + name: ubuntu + force: <1> + - name: focal <2> + version: 20.04 <3> +#... +---- +<1> Used to force the inclusion of specific distribution and version details in the resulting UpdaterSet, regardless of their status in the API response. Useful when you want to ensure that particular distributions and versions are consistently included in your updater configuration. +<2> Specifies the distribution name that you want to force to be included in the UpdaterSet. +<3> Specifies the version of the distribution you want to force into the UpdaterSet. + +[discrete] +=== Configuring the osv updater + +[source,yaml] +---- +#... +updaters: + sets: + - osv + config: + osv: + url: https://osv-vulnerabilities.storage.googleapis.com/ + allowlist: <1> + - npm + - pypi +#... +---- +<1> The list of ecosystems to allow. When left unset, all ecosystems are allowed. Must be lowercase. For a list of supported ecosystems, see the documentation for link:https://ossf.github.io/osv-schema/#affectedpackage-field[defined ecosystems]. + +[id="disabling-clair-updater-component-managed-db"] +== Disabling the Clair Updater component + +In some scenarios, users might want to disable the Clair updater component. Disabling updaters is required when running {productname} in a disconnected environment. + +In the following example, Clair updaters are disabled: + +[source,yaml] +---- +#... +matcher: + disable_updaters: true +#... +---- + + +//// + + +The following sections outline how one might configure specific updaters in Clair when it is being used in a proxy environment. + +[IMPORTANT] +==== +These are examples, and depending on how your proxy server is configured to route requests might impact how your `clair-config.yaml` file structure is determined. +==== + +[discrete] +=== Configuring the alpine updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - apline + config: + alpine: + url: https:///secdb/alpine/ <1> +---- +<1> Based on the `alpine` updater URL `\https://secdb.alpinelinux.org/`. + +[discrete] +=== Configuring the aws updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - aws + config: + aws: + url: https:///updates/x86_64/mirror.list <1> + url: https:///core/latest/x86_64/mirror.list <2> + url: https:///al2023/core/mirrors/latest/x86_64/mirror.list <3> +---- +<1> Based on the `aws` updater URL `\http://repo.us-west-2.amazonaws.com/2018.03/updates/x86_64/mirror.list`. +<2> Based on the `aws` updater URL `\https://cdn.amazonlinux.com/2/core/latest/x86_64/mirror.list`. +<3> Based on the `aws` updater URL `\https://cdn.amazonlinux.com/al2023/core/mirrors/latest/x86_64/mirror.list`. + +[discrete] +=== Configuring the debian updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - debian + config: + debian: + mirror_url: https:///debian-archive/ <1> + json_url: https:///debian-json/ <2> +---- +<1> Based on the `debian` updater URL `\https://deb.debian.org/`. +<2> Based on the `debian` updater URL `\https://security-tracker.debian.org/tracker/data/json`. + +[discrete] +=== Configuring the clair.cvss updater for proxy environments + +[source,yaml] +---- +#... +updaters: + config: + clair.cvss: + url: https:///feeds/json/cve/1.1/ <1> +---- +<1> Based on the `clair.cvss` updater URL `\https://nvd.nist.gov/feeds/json/cve/1.1/`. + +[discrete] +=== Configuring the oracle updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - oracle + config: + oracle: + url: https:///security/oval/com.oracle.elsa-*.xml.bz2 <1> +#... +---- +<1> Based on the `oracle` updater URL `\https://linux.oracle.com/security/oval/com.oracle.elsa-*.xml.bz2`. + +[discrete] +=== Configuring the photon updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - photon + config: + photon: + url: https:///photon/photon_oval_definitions/ <1> +#... +---- +<1> Based on the `photon` updater URL `\https://packages.vmware.com/photon/photon_oval_definitions/`. + + +[discrete] +=== Configuring the rhel updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - rhel + config: + rhel: + url: https:///mirror/oval/PULP_MANIFEST <1> + url: https:///security/cve/ <2> + ignore_unpatched: true <3> +#... +---- +<1> Based on the `rhel` updater URL `\https://access.redhat.com/security/data/oval/v2/PULP_MANIFEST`. +<2> Based on the `rhel` updater URL `\https://access.redhat.com/security/cve/`. +<3> Boolean. Whether to include information about vulnerabilities that do not have corresponding patches or updates available. + +[discrete] +=== Configuring the rhcc updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - rhcc + config: + rhcc: + url: https:///security/data/metrics/cvemap.xml <1> +#... +---- +<1> Based on the `rhcc` updater URL `\https://access.redhat.com/security/data/metrics/cvemap.xml`. + +[discrete] +=== Configuring the suse updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - suse + config: + suse: + url: https:///security/oval/ <1> +#... +---- +<1> Based on the `suse` updater URL `\https://support.novell.com/security/oval/`. + +[discrete] +=== Configuring the ubuntu updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - ubuntu + config: + ubuntu: + - url: https:///ubuntu-cve-oval/ <1> + - url: https:///ubuntu-launchpad-api/ <2> + name: ubuntu + force: <3> + - name: focal <4> + version: 20.04 <5> +#... +---- +<1> Based on the `ubuntu` updater URL `\https://security-metadata.canonical.com/oval/com.ubuntu.*.cve.oval.xml`. +<2> Based on the `ubuntu` updater URL `\https://api.launchpad.net/1.0/`. +<3> Used to force the inclusion of specific distribution and version details in the resulting UpdaterSet, regardless of their status in the API response. Useful when you want to ensure that particular distributions and versions are consistently included in your updater configuration. +<4> Specifies the distribution name that you want to force to be included in the UpdaterSet. In this case, it's set to `focal` to specify the Ubuntu distribution with the name `focal`. +<5> Specifies the version of the distribution you want to force into the UpdaterSet. Here, it's set to `20.04` to indicate that the specific version of the `focal` distribution to be included is `20.04`. + +[discrete] +=== Configuring the osv updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - osv + config: + osv: + url: https:///osv-vulnerabilities/ <1> + allowlist: <2> + - npm + - PyPI +#... +---- +<1> Based on the `osv` updater URL `\https://osv-vulnerabilities.storage.googleapis.com/`. +<2> The list of ecosystems to allow. When left unset, all ecosystems are allowed. For a list of supported ecosystems, see the documentation for link:https://ossf.github.io/osv-schema/#affectedpackage-field[defined ecosystems]. + +[id="disabling-clair-updater-component-managed-db"] +== Disabling the Clair Updater component + +In some scenarios, users might want to disable the Clair updater component. Disabling updaters is required when running {productname} in a disconnected environment. + +In the following example, Clair updaters are disabled: + +[source,yaml] +---- +#... +matcher: + disable_updaters: true +#... +---- +//// \ No newline at end of file diff --git a/modules/configuring-custom-clair-database-managed.adoc b/modules/configuring-custom-clair-database-managed.adoc index 28bb1d11e..245070fb9 100644 --- a/modules/configuring-custom-clair-database-managed.adoc +++ b/modules/configuring-custom-clair-database-managed.adoc @@ -6,7 +6,7 @@ [id="configuring-custom-clair-database-managed"] = Configuring a custom Clair database with a managed Clair configuration -The {productname} Operator for {ocp} allows users to provide their own Clair database. +{productname} on {ocp} allows users to provide their own Clair database. Use the following procedure to create a custom Clair database. @@ -42,7 +42,7 @@ notifier: [NOTE] ==== * The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`. -* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config]. +* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config]. ==== . Add the `clair-config.yaml` file to your bundle secret, for example: diff --git a/modules/configuring-custom-clair-database.adoc b/modules/configuring-custom-clair-database.adoc index 8395574b5..b082934fb 100644 --- a/modules/configuring-custom-clair-database.adoc +++ b/modules/configuring-custom-clair-database.adoc @@ -6,13 +6,13 @@ [id="configuring-custom-clair-database"] = Configuring a custom Clair database with an unmanaged Clair database -The {productname} Operator for {ocp} allows users to provide their own Clair database. +{productname} on {ocp} allows users to provide their own Clair database. Use the following procedure to create a custom Clair database. [NOTE] ==== -The following procedure sets up Clair with SSL/TLS certifications. To view a similar procedure that does not set up Clair with SSL/TSL certifications, see "Configuring a custom Clair database with a managed Clair configuration". +The following procedure sets up Clair with SSL/TLS certifications. To view a similar procedure that does not set up Clair with SSL/TLS certifications, see "Configuring a custom Clair database with a managed Clair configuration". ==== .Procedure @@ -47,7 +47,7 @@ notifier: [NOTE] ==== * The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`. -* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config]. +* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config]. ==== . Add the `clair-config.yaml` file to your bundle secret, for example: @@ -63,8 +63,8 @@ data: config.yaml: clair-config.yaml: extra_ca_cert_: - clair-ssl.crt: >- - clair-ssl.key: >- + ssl.crt: + ssl.key: ---- + [NOTE] diff --git a/modules/configuring-oci-media-types.adoc b/modules/configuring-oci-media-types.adoc new file mode 100644 index 000000000..dc16a1c62 --- /dev/null +++ b/modules/configuring-oci-media-types.adoc @@ -0,0 +1,106 @@ +// Document included in the following assemblies: + +// unused + +:_content-type: REFERENCE +[id="supported-oci-media-types"] += Configuring artifact types + +As a {productname} administrator, you can configure Open Container Initiative (OCI) artifact types and other experimental artifact types through the `FEATURE_GENERAL_OCI_SUPPORT` and `ALLOWED_OCI_ARTIFACT_TYPES` configuration fields. + +The following Open Container Initiative (OCI) artifact types are built into {productname} by default and are enabled through the *FEATURE_GENERAL_OCI_SUPPORT* configuration field: + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Media Type | Supported content types + +| *Helm* | `application/vnd.cncf.helm.config.v1+json` | `application/tar+gzip`, `application/vnd.cncf.helm.chart.content.v1.tar+gzip` + +| *Cosign* | `application/vnd.oci.image.config.v1+json` | `application/vnd.dev.cosign.simplesigning.v1+json`, `application/vnd.dsse.envelope.v1+json` + +| *SPDX* | `application/vnd.oci.image.config.v1+json` | `text/spdx`, `text/spdx+xml`, `text/spdx+json` + +| *Syft* | `application/vnd.oci.image.config.v1+json` | `application/vnd.syft+json` + +| *CycloneDX* | `application/vnd.oci.image.config.v1+json` | `application/vnd.cyclonedx`, `application/vnd.cyclonedx+xml`, `application/vnd.cyclonedx+json` + +| *In-toto* | `application/vnd.oci.image.config.v1+json` | `application/vnd.in-toto+json` + +| *Unknown* | `application/vnd.cncf.openpolicyagent.policy.layer.v1+rego` | `application/vnd.cncf.openpolicyagent.policy.layer.v1+rego`, `application/vnd.cncf.openpolicyagent.data.layer.v1+json` + +|=== + +Additionally, {productname} uses the _ZStandard_, or _zstd_, to reduce the size of container images or other related artifacts. Zstd helps optimize storage and improve transfer speeds when working with container images. + +Use the following procedures to configure support for the default and experimental OCI media types. + +[id="configuring-oci-media-types-proc"] +== Configuring OCI artifact types + +Use the following procedure to configure artifact types that are embedded in {productname} by default. + +.Prerequisites + +* You have {productname} administrator privileges. + +.Procedure + +* In your {productname} `config.yaml` file, enable support for general OCI support by setting the `FEATURE_GENERAL_OCI_SUPPORT` field to `true`. For example: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +---- ++ +With `FEATURE_GENERAL_OCI_SUPPORT` set to true, {productname} users can now push and pull charts of the default artifact types to their {productname} deployment. + +[id="configuring-additional-oci-media-types-proc"] +== Configuring additional artifact types + +Use the following procedure to configure additional, and specific, artifact types for your {productname} deployment. + +[NOTE] +==== +Using the `ALLOWED_OCI_ARTIFACT_TYPES` configuration field, you can restrict which artifact types are accepted by your {productname} registry. If you want your {productname} deployment to accept all artifact types, see "Configuring unknown media types". +==== + +.Prerequistes + +* You have {productname} administrator privileges. + +.Procedure + +* Add the `ALLOWED_OCI_ARTIFACT_TYPES` configuration field, along with the configuration and layer types: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +ALLOWED_OCI_ARTIFACT_TYPES: + : + - + - + + : + - + - +---- ++ +For example, you can add Singularity Image Format (SIF) support by adding the following to your `config.yaml` file: ++ +[source,yaml] +---- +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.oci.image.config.v1+json: + - application/vnd.dev.cosign.simplesigning.v1+json + application/vnd.cncf.helm.config.v1+json: + - application/tar+gzip + application/vnd.sylabs.sif.config.v1+json: + - application/vnd.sylabs.sif.layer.v1+tar +---- ++ +[NOTE] +==== +When adding OCI artifact types that are not configured by default, {productname} administrators will also need to manually add support for Cosign and Helm if desired. +==== ++ +Now, users can tag SIF images for their {productname} registry. \ No newline at end of file diff --git a/modules/configuring-oidc-authentication.adoc b/modules/configuring-oidc-authentication.adoc new file mode 100644 index 000000000..9ebecb2b7 --- /dev/null +++ b/modules/configuring-oidc-authentication.adoc @@ -0,0 +1,58 @@ +:_content-type: PROCEDURE +[id="configuring-oidc-authentication"] += Configuring OIDC for {productname} + +Configuring OpenID Connect (OIDC) for {productname} can provide several benefits to your deployment. For example, OIDC allows users to authenticate to {productname} using their existing credentials from an OIDC provider, such as link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/7.0[Red Hat Single Sign-On], Google, Github, Microsoft, or others. Other benefits of OIDC include centralized user management, enhanced security, and single sign-on (SSO). Overall, OIDC configuration can simplify user authentication and management, enhance security, and provide a seamless user experience for {productname} users. + +The following procedures show you how to configure Microsoft Entra ID on a standalone deployment of {productname}, and how to configure Red Hat Single Sign-On on an Operator-based deployment of {productname}. These procedures are interchangeable depending on your deployment type. + +[NOTE] +==== +By following these procedures, you will be able to add any OIDC provider to {productname}, regardless of which identity provider you choose to use. +==== + +[id="configuring-entra-oidc"] +== Configuring Microsoft Entra ID OIDC on a standalone deployment of {productname} + +By integrating Microsoft Entra ID authentication with {productname}, your organization can take advantage of the centralized user management and security features offered by Microsoft Entra ID. Some features include the ability to manage user access to {productname} repositories based on their Microsoft Entra ID roles and permissions, and the ability to enable multi-factor authentication and other security features provided by Microsoft Entra ID. + +Azure Active Directory (Microsoft Entra ID) authentication for {productname} allows users to authenticate and access {productname} using their Microsoft Entra ID credentials. + +Use the following procedure to configure Microsoft Entra ID by updating the {productname} `config.yaml` file directly. + +.Procedure + +[NOTE] +==== +* Using the following procedure, you can add any ODIC provider to {productname}, regardless of which identity provider is being added. +* If your system has a firewall in use, or proxy enabled, you must whitelist all Azure API endpoints for each Oauth application that is created. Otherwise, the following error is returned: `x509: certificate signed by unknown authority`. +==== + +. Use the following reference and update your `config.yaml` file with your desired OIDC provider's credentials: ++ +[source,yaml] +---- +AUTHENTICATION_TYPE: OIDC +# ... +AZURE_LOGIN_CONFIG: <1> + CLIENT_ID: <2> + CLIENT_SECRET: <3> + OIDC_SERVER: <4> + SERVICE_NAME: Microsoft Entra ID <5> + VERIFIED_EMAIL_CLAIM_NAME: <6> +# ... +---- +<1> The parent key that holds the OIDC configuration settings. In this example, the parent key used is `AZURE_LOGIN_CONFIG`, however, the string `AZURE` can be replaced with any arbitrary string based on your specific needs, for example `ABC123`.However, the following strings are not accepted: `GOOGLE`, `GITHUB`. These strings are reserved for their respective identity platforms and require a specific `config.yaml` entry contingent upon when platform you are using. +<2> The client ID of the application that is being registered with the identity provider. +<3> The client secret of the application that is being registered with the identity provider. +<4> The address of the OIDC server that is being used for authentication. In this example, you must use `sts.windows.net` as the issuer identifier. Using `https://login.microsoftonline.com` results in the following error: `Could not create provider for AzureAD. Error: oidc: issuer did not match the issuer returned by provider, expected "https://login.microsoftonline.com/73f2e714-xxxx-xxxx-xxxx-dffe1df8a5d5" got "https://sts.windows.net/73f2e714-xxxx-xxxx-xxxx-dffe1df8a5d5/"`. +<5> The name of the service that is being authenticated. +<6> The name of the claim that is used to verify the email address of the user. + +. Proper configuration of Microsoft Entra ID results three redirects with the following format: ++ +* `\https://QUAY_HOSTNAME/oauth2//callback` +* `\https://QUAY_HOSTNAME/oauth2//callback/attach` +* `\https://QUAY_HOSTNAME/oauth2//callback/cli` + +. Restart your {productname} deployment. \ No newline at end of file diff --git a/modules/configuring-openshift-tls-component-builds.adoc b/modules/configuring-openshift-tls-component-builds.adoc new file mode 100644 index 000000000..8f474bd93 --- /dev/null +++ b/modules/configuring-openshift-tls-component-builds.adoc @@ -0,0 +1,35 @@ +:_content-type: CONCEPT +[id="configuring-openshift-tls-component-builds"] += Configuring the {ocp} TLS component for builds + +The `tls` component of the `QuayRegistry` custom resource definition (CRD) allows you to control whether SSL/TLS are managed by the {productname} Operator, or self managed. In its current state, {productname} does not support the _builds_ feature, or the _builder_ workers, when the `tls` component is managed by the {productname} Operator. + +When setting the `tls` component to `unmanaged`, you must supply your own `ssl.cert` and `ssl.key` files. Additionally, if you want your cluster to support _builders_, or the worker nodes that are responsible for building images, you must add both the `Quay` route and the `builder` route name to the SAN list in the certificate. Alternatively, however, you could use a wildcard. + +The following procedure shows you how to add the _builder_ route. + +.Prerequisites + +* You have set the `tls` component to `unmanaged` and uploaded custom SSL/TLS certificates to the {productname} Operator. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#ssl-tls-quay-overview[SSL and TLS for {productname}]. + +.Procedure + +* In the configuration file that defines your SSL/TLS certificate parameters, for example, `openssl.cnf`, add the following information to the certificate's Subject Alternative Name (SAN) field. For example: ++ +[source,yaml] +---- +# ... +[alt_names] +-quay-builder-.:443 +# ... +---- ++ +For example: ++ +[source,yaml] +---- +# ... +[alt_names] +example-registry-quay-builder-quay-enterprise.apps.cluster-new.gcp.quaydev.org:443 +# ... +---- \ No newline at end of file diff --git a/modules/configuring-port-mapping.adoc b/modules/configuring-port-mapping.adoc new file mode 100644 index 000000000..7331a5f26 --- /dev/null +++ b/modules/configuring-port-mapping.adoc @@ -0,0 +1,38 @@ +:_content-type: PROCEDURE +[id="configuring-port-mapping"] += Configuring port mapping for {productname} + +You can use port mappings to expose ports on the host and then use these ports in combination with the host IP address or host name to navigate to the {productname} endpoint. + +.Procedure + +. Enter the following command to obtain your static IP address for your host system: ++ +[source,terminal] +---- +$ ip a +---- ++ +.Example output ++ +[source,terminal] +---- +--- + link/ether 6c:6a:77:eb:09:f1 brd ff:ff:ff:ff:ff:ff + inet 192.168.1.132/24 brd 192.168.1.255 scope global dynamic noprefixroute wlp82s0 +--- +---- + +. Add the IP address and a local hostname, for example, `quay-server.example.com` to your `/etc/hosts` file that will be used to reach the {productname} endpoint. You can confirm that the IP address and hostname have been added to the `/etc/hosts` file by entering the following command: ++ +[source,terminal] +---- +$ cat /etc/hosts +---- ++ +.Example output ++ +[source,terminal] +---- +192.168.1.138 quay-server.example.com +---- \ No newline at end of file diff --git a/modules/configuring-quay-ocp-aws-sts.adoc b/modules/configuring-quay-ocp-aws-sts.adoc new file mode 100644 index 000000000..935aca749 --- /dev/null +++ b/modules/configuring-quay-ocp-aws-sts.adoc @@ -0,0 +1,83 @@ +[id="configuring-quay-ocp-aws-sts"] += Configuring {productname-ocp} to use AWS STS + +Use the following procedure to edit your {productname-ocp} `config.yaml` file to use AWS STS. + +[NOTE] +==== +You can also edit and re-deploy your {productname-ocp} `config.yaml` file directly instead of using the {ocp} UI. +==== + +.Prerequisites + +* You have configured a Role ARN. +* You have generated a User Access Key. +* You have generated a User Secret Key. + +.Procedure + +. On the *Home* page of your {ocp} deployment, click *Operators* -> *Installed Operators*. + +. Click *Red Hat Quay*. + +. Click *Quay Registry* and then the name of your {productname} registry. + +. Under *Config Bundle Secret*, click the name of your registry configuration bundle, for example, *quay-registry-config-bundle-qet56*. + +. On the configuration bundle page, click *Actions* to reveal a drop-down menu. Then click *Edit Secret*. + +. Update your the `DISTRIBUTED_STORAGE_CONFIG` fields of your `config.yaml` file with the following information: ++ +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_CONFIG: + default: + - STSS3Storage + - sts_role_arn: <1> + s3_bucket: <2> + storage_path: <3> + s3_region: <4> + sts_user_access_key: <5> + sts_user_secret_key: <6> +# ... +---- +<1> The unique Amazon Resource Name (ARN) required when configuring AWS STS +<2> The name of your s3 bucket. +<3> The storage path for data. Usually `/datastorage`. +<4> Optional. The Amazon Web Services region. Defaults to `us-east-1`. +<5> The generated AWS S3 user access key required when configuring AWS STS. +<6> The generated AWS S3 user secret key required when configuring AWS STS. + +. Click *Save*. + +.Verification + +. Tag a sample image, for example, `busybox`, that will be pushed to the repository. For example: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test +---- + +. Push the sample image by running the following command: ++ +[source,terminal] +---- +$ podman push //busybox:test +---- + +. Verify that the push was successful by navigating to the Organization that you pushed the image to in your {productname} registry -> *Tags*. + +. Navigate to the Amazon Web Services (AWS) console and locate your s3 bucket. + +. Click the name of your s3 bucket. + +. On the *Objects* page, click *datastorage/*. + +. On the *datastorage/* page, the following resources should seen: ++ +* *sha256/* +* *uploads/* ++ +These resources indicate that the push was successful, and that AWS STS is properly configured. diff --git a/modules/configuring-quay-standalone-aws-sts.adoc b/modules/configuring-quay-standalone-aws-sts.adoc new file mode 100644 index 000000000..c00c4b556 --- /dev/null +++ b/modules/configuring-quay-standalone-aws-sts.adoc @@ -0,0 +1,62 @@ +[id="configuring-quay-standalone-aws-sts"] += Configuring {productname} to use AWS STS + +Use the following procedure to edit your {productname} `config.yaml` file to use AWS STS. + +.Procedure + +. Update your `config.yaml` file for {productname} to include the following information: ++ +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_CONFIG: + default: + - STSS3Storage + - sts_role_arn: <1> + s3_bucket: <2> + storage_path: <3> + s3_region: <4> + sts_user_access_key: <5> + sts_user_secret_key: <6> +# ... +---- +<1> The unique Amazon Resource Name (ARN) required when configuring AWS STS +<2> The name of your s3 bucket. +<3> The storage path for data. Usually `/datastorage`. +<4> Optional. The Amazon Web Services region. Defaults to `us-east-1`. +<5> The generated AWS S3 user access key required when configuring AWS STS. +<6> The generated AWS S3 user secret key required when configuring AWS STS. + +. Restart your {productname} deployment. + +.Verification + +. Tag a sample image, for example, `busybox`, that will be pushed to the repository. For example: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test +---- + +. Push the sample image by running the following command: ++ +[source,terminal] +---- +$ podman push //busybox:test +---- + +. Verify that the push was successful by navigating to the Organization that you pushed the image to in your {productname} registry -> *Tags*. + +. Navigate to the Amazon Web Services (AWS) console and locate your s3 bucket. + +. Click the name of your s3 bucket. + +. On the *Objects* page, click *datastorage/*. + +. On the *datastorage/* page, the following resources should seen: ++ +* *sha256/* +* *uploads/* ++ +These resources indicate that the push was successful, and that AWS STS is properly configured. \ No newline at end of file diff --git a/modules/configuring-red-hat-sso.adoc b/modules/configuring-red-hat-sso.adoc new file mode 100644 index 000000000..a9d191acf --- /dev/null +++ b/modules/configuring-red-hat-sso.adoc @@ -0,0 +1,95 @@ +[id="configuring-red-hat-sso-oidc"] += Configuring Red Hat Single Sign-On for {productname} + +Based on the Keycloak project, Red Hat Single Sign-On (RH-SSO) is an open source identity and access management (IAM) solution provided by Red Hat. RH-SSO allows organizations to manage user identities, secure applications, and enforce access control policies across their systems and applications. It also provides a unified authentication and authorization framework, which allows users to log in one time and gain access to multiple applications and resources without needing to re-authenticate. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/7.0[Red Hat Single Sign-On]. + +By configuring Red Hat Single Sign-On on {productname}, you can create a seamless authentication integration between {productname} and other application platforms like {ocp}. + +[id="configuring-red-hat-sso-using-config-tool"] +== Configuring the Red Hat Single Sign-On Operator for use with the {productname} Operator + +Use the following procedure to configure Red Hat Single Sign-On for the {productname} Operator on {ocp}. + +.Prerequisites + +* You have set up the Red Hat Single Sign-On Operator. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/7.6/html-single/server_installation_and_configuration_guide/index#operator[Red Hat Single Sign-On Operator]. +* You have configured link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploying_the_red_hat_quay_operator_on_openshift_container_platform/operator-config-cli#operator-custom-ssl-certs-config-bundle[SSL/TLS for your {productname-ocp} deployment] _and_ for Red Hat Single Sign-On. +* You have generated a single Certificate Authority (CA) and uploaded it to your Red Hat Single Sign-On Operator _and_ to your {productname} configuration. + +.Procedure + +. Navigate to the Red Hat Single Sign-On *Admin Console*. + +.. On the {ocp} *Web Console*, navigate to *Network* -> *Route*. + +.. Select the *Red Hat Single Sign-On* project from the drop-down list. + +.. Find the Red Hat Single Sign-On *Admin Console* in the *Routes* table. + +. Select the Realm that you will use to configure {productname}. + +. Click *Clients* under the *Configure* section of the navigation panel, and then click the *Create* button to add a new OIDC for {productname}. + +. Enter the following information. ++ +* **Client ID:** `quay-enterprise` +* **Client Protocol:** `openid-connect` +* **Root URL:** `\https:///` + +. Click *Save*. This results in a redirect to the *Clients* setting panel. + +. Navigate to *Access Type* and select *Confidential*. + +. Navigate to *Valid Redirect URIs*. You must provide three redirect URIs. The value should be the fully qualified domain name of the {productname} registry appended with `/oauth2/redhatsso/callback`. For example: ++ +* `\https:///oauth2/redhatsso/callback` +* `\https:///oauth2/redhatsso/callback/attach` +* `\https:///oauth2/redhatsso/callback/cli` + +. Click *Save* and navigate to the new *Credentials* setting. + +. Copy the value of the Secret. + +[id="configuring-quay-operator-use-redhat-sso"] +=== Configuring the {productname} Operator to use Red Hat Single Sign-On + +Use the following procedure to configure Red Hat Single Sign-On with the {productname} Operator. + +.Prerequisites + +* You have set up the Red Hat Single Sign-On Operator. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/7.6/html-single/server_installation_and_configuration_guide/index#operator[Red Hat Single Sign-On Operator]. +* You have configured link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploying_the_red_hat_quay_operator_on_openshift_container_platform/operator-config-cli#operator-custom-ssl-certs-config-bundle[SSL/TLS for your {productname-ocp} deployment] _and_ for Red Hat Single Sign-On. +* You have generated a single Certificate Authority (CA) and uploaded it to your Red Hat Single Sign-On Operator _and_ to your {productname} configuration. + +.Procedure + +. Edit your {productname} `config.yaml` file by navigating to *Operators* -> *Installed Operators* -> *Red Hat Quay* -> *Quay Registry* -> *Config Bundle Secret*. Then, click *Actions* -> *Edit Secret*. Alternatively, you can update the `config.yaml` file locally. + +. Add the following information to your {productname-ocp} `config.yaml` file: ++ +[source,yaml] +---- +# ... +RHSSO_LOGIN_CONFIG: <1> + CLIENT_ID: <2> + CLIENT_SECRET: <3> + OIDC_SERVER: <4> + SERVICE_NAME: <5> + SERVICE_ICON: <6> + VERIFIED_EMAIL_CLAIM_NAME: <7> + PREFERRED_USERNAME_CLAIM_NAME: <8> + LOGIN_SCOPES: <9> + - 'openid' +# ... +---- +<1> The parent key that holds the OIDC configuration settings. In this example, the parent key used is `AZURE_LOGIN_CONFIG`, however, the string `AZURE` can be replaced with any arbitrary string based on your specific needs, for example `ABC123`.However, the following strings are not accepted: `GOOGLE`, `GITHUB`. These strings are reserved for their respective identity platforms and require a specific `config.yaml` entry contingent upon when platform you are using. +<2> The client ID of the application that is being registered with the identity provider, for example, `quay-enterprise`. +<3> The Client Secret from the *Credentials* tab of the `quay-enterprise` OIDC client settings. +<4> The fully qualified domain name (FQDN) of the Red Hat Single Sign-On instance, appended with `/auth/realms/` and the Realm name. You must include the forward slash at the end, for example, `\https://sso-redhat.example.com//auth/realms//`. +<5> The name that is displayed on the {productname} login page, for example, `Red hat Single Sign On`. +<6> Changes the icon on the login screen. For example, `/static/img/RedHat.svg`. +<7> The name of the claim that is used to verify the email address of the user. +<8> The name of the claim that is used to verify the email address of the user. +<9> The scopes to send to the OIDC provider when performing the login flow, for example, `openid`. + +. Restart your {productname-ocp} deployment with Red Hat Single Sign-On enabled. \ No newline at end of file diff --git a/modules/configuring-resources-managed-components.adoc b/modules/configuring-resources-managed-components.adoc new file mode 100644 index 000000000..097afe83a --- /dev/null +++ b/modules/configuring-resources-managed-components.adoc @@ -0,0 +1,139 @@ +:_content-type: PROCEDURE +[id="configuring-resources-managed-components"] += Configuring resources for managed components on {ocp} + +You can manually adjust the resources on {productname-ocp} for the following components that have running pods: + +* `quay` +* `clair` +* `mirroring` +* `clairpostgres` +* `postgres` + +This feature allows users to run smaller test clusters, or to request more resources upfront in order to avoid partially degraded `Quay` pods. Limitations and requests can be set in accordance with link:https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes[Kubernetes resource units]. + +The following components should not be set lower than their minimum requirements. This can cause issues with your deployment and, in some cases, result in failure of the pod's deployment. + +* `quay`: Minimum of 6 GB, 2vCPUs +* `clair`: Recommended of 2 GB memory, 2 vCPUs +* `clairpostgres`: Minimum of 200 MB + +You can configure resource requests on the {ocp} UI, or by directly by updating the `QuayRegistry` YAML. + +[IMPORTANT] +==== +The default values set for these components are the suggested values. Setting resource requests too high or too low might lead to inefficient resource utilization, or performance degradation, respectively. +==== + +[id="configuring-resources-ocp-ui"] +== Configuring resource requests by using the {ocp} UI + +Use the following procedure to configure resources by using the {ocp} UI. + +.Procedure + +. On the {ocp} developer console, click *Operators* -> *Installed Operators* -> *Red Hat Quay*. + +. Click *QuayRegistry*. + +. Click the name of your registry, for example, *example-registry*. + +. Click *YAML*. + +. In the `spec.components` field, you can override the resource of the `quay`, `clair`, `mirroring` `clairpostgres`, and `postgres` resources by setting values for the `.overrides.resources.limits` and the `overrides.resources.requests` fields. For example: ++ +[source,yaml] +---- +spec: + components: + - kind: clair + managed: true + overrides: + resources: + limits: + cpu: "5" # Limiting to 5 CPU (equivalent to 5000m or 5000 millicpu) + memory: "18Gi" # Limiting to 18 Gibibytes of memory + requests: + cpu: "4" # Requesting 4 CPU + memory: "4Gi" # Requesting 4 Gibibytes of memory + - kind: postgres + managed: true + overrides: + resources: + limits: {} <1> + requests: + cpu: "700m" # Requesting 700 millicpu or 0.7 CPU + memory: "4Gi" # Requesting 4 Gibibytes of memory + - kind: mirror + managed: true + overrides: + resources: + limits: <2> + requests: + cpu: "800m" # Requesting 800 millicpu or 0.8 CPU + memory: "1Gi" # Requesting 1 Gibibyte of memory + - kind: quay + managed: true + overrides: + resources: + limits: + cpu: "4" # Limiting to 4 CPU + memory: "10Gi" # Limiting to 10 Gibibytes of memory + requests: + cpu: "4" # Requesting 4 CPU + memory: "10Gi" # Requesting 10 Gibi of memory + - kind: clairpostgres + managed: true + overrides: + resources: + limits: + cpu: "800m" # Limiting to 800 millicpu or 0.8 CPU + memory: "3Gi" # Limiting to 3 Gibibytes of memory + requests: {} +---- +<1> Setting the `limits` or `requests` fields to `{}` uses the default values for these resources. +<2> Leaving the `limits` or `requests` field empty puts no limitations on these resources. + +[id="configuring-resources-ocp-yaml"] +== Configuring resource requests by editing the QuayRegistry YAML + +You can re-configure {productname} to configure resource requests after you have already deployed a registry. This can be done by editing the `QuayRegistry` YAML file directly and then re-deploying the registry. + +.Procedure + +. Optional: If you do not have a local copy of the `QuayRegistry` YAML file, enter the following command to obtain it: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml > quayregistry.yaml +---- + +. Open the `quayregistry.yaml` created from Step 1 of this procedure and make the desired changes. For example: ++ +[source,yaml] +---- + - kind: quay + managed: true + overrides: + resources: + limits: {} + requests: + cpu: "0.7" # Requesting 0.7 CPU (equivalent to 500m or 500 millicpu) + memory: "512Mi" # Requesting 512 Mebibytes of memory +---- + +. Save the changes. + +. Apply the {productname} registry using the updated configurations by running the following command: ++ +[source,terminal] +---- +$ oc replace -f quayregistry.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry replaced +---- \ No newline at end of file diff --git a/modules/configuring-ssl-tls.adoc b/modules/configuring-ssl-tls.adoc new file mode 100644 index 000000000..58b4765f9 --- /dev/null +++ b/modules/configuring-ssl-tls.adoc @@ -0,0 +1,5 @@ +:_content-type: PROCEDURE +[id="configuring-ssl-tls"] += Configuring SSL/TLS for standalone {productname} deployments + +For standalone {productname} deployments, SSL/TLS certificates must be configured by using the command-line interface and by updating your `config.yaml` file manually. \ No newline at end of file diff --git a/modules/connecting-s3-timeout.adoc b/modules/connecting-s3-timeout.adoc new file mode 100644 index 000000000..2865995f4 --- /dev/null +++ b/modules/connecting-s3-timeout.adoc @@ -0,0 +1,32 @@ +:_content-type: PROCEDURE +[id="connecting-s3-timeout"] += Connection to AWS s3 bucket errors out + +In some cases, {productname} attempts to connect to the s3 bucket that is described in a `config.yaml` file and errors out. Running {productname} in debug might reveal the following error: `gunicorn-registry stdout | 2022-10-21 14:38:36,892 [253] [DEBUG] [urllib3.connectionpool] https://s3.ap-south-1.amazonaws.com:443 "POST /quay-bucket-1/storage/quayregistrystorage/uploads/23cd6e62-264c-48e4-94a7-21061b0e4ef1?uploads HTTP/1.1" 400 None`. + +This error occurs because the URL format of a bucket is one of two options. For example: + +* `\http://s3.amazonaws.com/[bucket_name]/` +* `http://[bucket_name].s3.amazonaws.com/` + +To resolve this issue, you must add the `s3_region` configuration parameter to your `config.yaml` file. This field is not currently embedded in the {productname} config editor, so it must be manually added. If this field is not present in your `config.yaml` file, the Authorization header explicitly mentions a different region and not the region set in the `hostname` field of your `config.yaml` file. + +The following `config.yaml` excerpt is the correct configuration when using AWS: + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - S3Storage + - host: s3.ap-south-1.amazonaws.com + s3_access_key: ***************** + s3_bucket: quay-bucket-1 + s3_secret_key: ********************************* + storage_path: /storage/quayregistrystorage + s3_region: ap-south-1 +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6993082[Quay Errors out while connecting to AWS S3 Bucket]. \ No newline at end of file diff --git a/modules/connection-issues-clair-quay-db.adoc b/modules/connection-issues-clair-quay-db.adoc new file mode 100644 index 000000000..85df3f599 --- /dev/null +++ b/modules/connection-issues-clair-quay-db.adoc @@ -0,0 +1,58 @@ +:_content-type: PROCEDURE +[id="connection-issues-clair-quay-db"] += Clair and Quay database connection issues + +When attempting to connect to an external PostgreSQL database for {productname} and Clair, attempting to connect to the "public" schema might not be allowed. The public schema is only accessible by PostgreSQL administrators. Use the following procedure to troubleshoot connection issues. + +.Procedure + +. If you are an individual user accessing separate databases, use the following steps. + +.. Specify a `search_path`, for example, `\https://postgresqlco.nf/doc/en/param/search_path/` to the `Quay` and `Clair` database for the service account or user. + +... Enter the following command on the `Quay` database: ++ +[source,terminal] +---- +ALTER ROLE "role_name" SET search_path TO quay_username; +---- + +.. Enter the following command on the `Clair` database: ++ +[source,terminal] +---- +ALTER ROLE "role_name" SET search_path TO clair_username; +---- ++ +[NOTE] +==== +This will not allow one user to access the `Quay` and `Clair` database at the same time. +==== + +.. Optional. The `quay-app` pod requires the database to be created in advance to that the `quay-app-upgrade` pod assists in setting it up. As a result, schemas and tables are already populated. This causes an issue with the `search_path` setting for {productname}. To resolve this issue, add a `search_path` in your {productname} and Clair `config.yaml` files. + +... Add the following line to your {productname} `config.yaml` file: ++ +[source,yaml] +---- +DB_URI: postgresql://:@quayhostname.example.com:/quay_username?options=-csearch_path%3Dquay_username +---- + +... Reset the `Quay` database by cleaning it up and restarting the `quay-upgrade-app-pod`. + +... Configure the Clair connstring to use a `search_path` by adding the following line to your Clair `config.yaml` file: ++ +[source,yamnl] +---- +indexer: +connstring: host= port=5402 dbname=db_name user= password= sslmode=disable options=--search_path=clair_username +---- + +. In some cases, you might have a single service account or be a user that can access both databases. A database contains one or more named schemas, which in turn contains tables. Unlike databases, schemas are not rigidly separated; that is, a user can access objects in any of the schemas in the database that they are connected to if they have proper privileges. ++ +In this case, you must ensure that the tables of your {productname} and Clair are part of the same schema. Otherwise, unqualified queries are unable to find the tables. The queries from {productname} and Clair are all unqualified, as they expect the tables to be accessible from the database connection by default. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/7004240[Issue connecting to external Clair/Quay databases]. \ No newline at end of file diff --git a/modules/content-distrib-intro.adoc b/modules/content-distrib-intro.adoc index d3a8e6a95..e6242a880 100644 --- a/modules/content-distrib-intro.adoc +++ b/modules/content-distrib-intro.adoc @@ -6,6 +6,6 @@ Content distribution features in {productname} include: * xref:arch-mirroring-intro[Repository mirroring] -* xref:arch-georepl-intro[Geo-replication] +* xref:georepl-intro[Geo-replication] * xref:arch-airgap-intro[Deployment in air-gapped environments] diff --git a/modules/core-prereqs-db.adoc b/modules/core-prereqs-db.adoc index 7189368e9..d77c38263 100644 --- a/modules/core-prereqs-db.adoc +++ b/modules/core-prereqs-db.adoc @@ -2,10 +2,10 @@ [id="arch-core-prereqs-db"] = Database backend -{productname} stores most of its configuration and all metadata and logs inside its database backend, although logs can be pushed to ElasticSearch if required. PostgreSQL is the preferred database backend because it can be used for both {productname} and Clair. +{productname} stores all of its configuration information in the `config.yaml` file. Registry metadata, for example, user information, robot accounts, team, permissions, organizations, images, tags, manifests, etc. are stored inside of the database backend. Logs can be pushed to ElasticSearch if required. PostgreSQL is the preferred database backend because it can be used for both {productname} and Clair. -A future version of {productname} will remove support for using MySQL and MariaDB as the database backend, which has been deprecated since the {productname} 3.6 release. Until then, MySQL is still supported according to the link:https://access.redhat.com/articles/4067991[support matrix], but will not receive additional features or explicit testing coverage. The {productname} Operator only supports PostgreSQL as a managed database because {productname} 3.4. External MySQL and MariaDB databases can still be leveraged by setting the database to `unmanaged` in the Operator, until support is removed. +A future version of {productname} will remove support for using MySQL and MariaDB as the database backend, which has been deprecated since the {productname} 3.6 release. Until then, MySQL is still supported according to the link:https://access.redhat.com/articles/4067991[support matrix], but will not receive additional features or explicit testing coverage. The {productname} Operator supports only PostgreSQL deployments when the database is managed. If you want to use MySQL, you must deploy it manually and set the database component to `managed: false`. -Deploying {productname} in a highly available (HA) configuration requires that your database is provisioned for high availability. If {productname} is running on public cloud infrastructure, it is recommended that you use the PostgreSQL services provided by your cloud provider. +Deploying {productname} in a highly available (HA) configuration requires that your database services are provisioned for high availability. If {productname} is running on public cloud infrastructure, it is recommended that you use the PostgreSQL services provided by your cloud provider, however MySQL is also supported. Geo-replication requires a single, shared database that is accessible from all regions. diff --git a/modules/core-prereqs-redis.adoc b/modules/core-prereqs-redis.adoc index beb7e4568..ab24591aa 100644 --- a/modules/core-prereqs-redis.adoc +++ b/modules/core-prereqs-redis.adoc @@ -2,6 +2,8 @@ [id="core-prereqs-redis"] = Redis -{productname} stores builder logs inside a Redis cache. Because the data stored is ephemeral, Redis does not need to be highly available even though it is stateful. If Redis does fail, you will only lose access to build logs and builders. Additionally, user events will be unavailable. +{productname} stores builder logs inside a Redis cache. Because the data stored is ephemeral, Redis does not need to be highly available even though it is stateful. + +If Redis fails, you will lose access to build logs, builders, and the garbage collector service. Additionally, user events will be unavailable. You can use a Redis image from the Red Hat Software Collections or from any other source you prefer. diff --git a/modules/core-prereqs-storage.adoc b/modules/core-prereqs-storage.adoc index 97713f99b..bcfba3718 100644 --- a/modules/core-prereqs-storage.adoc +++ b/modules/core-prereqs-storage.adoc @@ -9,9 +9,9 @@ Local storage:: {productname} can work with local storage, however this should o HA storage setup:: For a {productname} HA deployment, you must provide HA image storage, for example: + - **Red Hat OpenShift Data Foundation**, previously known as Red Hat OpenShift Container Storage, is software-defined storage for containers. Engineered as the data and storage services platform for {ocp}, Red Hat OpenShift Data Foundation helps teams develop and deploy applications quickly and efficiently across clouds. More information can be found at link:https://www.redhat.com/en/technologies/cloud-computing/openshift-data-foundation[]. -- **Ceph Object Gateway** (also called RADOS Gateway) is an example of a storage solution that can provide the the object storage needed by {productname}. -Detailed instructions on how to use Ceph storage as a highly available storage backend can be found in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_-_high_availability/preparing_for_red_hat_quay_high_availability#set_up_ceph[Quay High Availability Guide]. -Further information about Red Hat Ceph Storage and HA setups can be found in the link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/pdf/architecture_guide/Red_Hat_Ceph_Storage-3-Architecture_Guide-en-US.pdf[Red Hat Ceph Storage Architecture Guide] +- **Ceph Object Gateway** (also called RADOS Gateway) is an example of a storage solution that can provide the object storage needed by {productname}. +Detailed instructions on how to use Ceph storage as a highly available storage backend can be found in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_-_high_availability/preparing_for_red_hat_quay_high_availability#set_up_ceph[Quay High Availability Guide]. +Further information about Red Hat Ceph Storage and HA setups can be found in the link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/pdf/architecture_guide/Red_Hat_Ceph_Storage-3-Architecture_Guide-en-US.pdf[Red Hat Ceph Storage Architecture Guide] Geo-replication:: Local storage cannot be used for geo-replication, so a supported on premise or cloud based object storage solution must be deployed. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the {productname} instance, and will then be replicated, in the background, to the other storage engines. This requires the image storage to be accessible from all regions. @@ -29,3 +29,4 @@ Geo-replication:: Local storage cannot be used for geo-replication, so a support * Amazon Web Services (AWS) S3 * Google Cloud Storage * Azure Blob Storage +* Hitachi Content Platform (HCP) diff --git a/modules/cosign-oci-intro.adoc b/modules/cosign-oci-intro.adoc index 920b15af9..fa6ce0e2e 100644 --- a/modules/cosign-oci-intro.adoc +++ b/modules/cosign-oci-intro.adoc @@ -1,7 +1,8 @@ -[[cosign-oci-intro]] -= Cosign OCI support with {productname} +:_content-type: CONCEPT +[id="cosign-oci-intro"] += Cosign OCI support -Cosign is a tool that can be used to sign and verify container images. It uses the ECDSA-P256 signature algorithm and Red Hat's Simple Signing payload format to create public keys that are stored in PKIX files. Private keys are stored as encrypted PEM files. +Cosign is a tool that can be used to sign and verify container images. It uses the `ECDSA-P256` signature algorithm and Red Hat's Simple Signing payload format to create public keys that are stored in PKIX files. Private keys are stored as encrypted PEM files. Cosign currently supports the following: @@ -9,3 +10,106 @@ Cosign currently supports the following: * Bring-your-own PKI * OIDC PKI * Built-in binary transparency and timestamping service + +Use the following procedure to directly install Cosign. + +.Prerequisites + +* You have installed Go version 1.16 or later. +ifeval::["{context}" == "use-quay"] +* You have set `FEATURE_GENERAL_OCI_SUPPORT` to `true` in your `config.yaml` file. +endif::[] + +.Procedure + +. Enter the following `go` command to directly install Cosign: ++ +[source,terminal] +---- +$ go install github.com/sigstore/cosign/cmd/cosign@v1.0.0 +---- ++ +.Example output ++ +[source,terminal] +---- +go: downloading github.com/sigstore/cosign v1.0.0 +go: downloading github.com/peterbourgon/ff/v3 v3.1.0 +---- + +. Generate a key-value pair for Cosign by entering the following command: ++ +[source,terminal] +---- +$ cosign generate-key-pair +---- ++ +.Example output ++ +[source,terminal] +---- +Enter password for private key: +Enter again: +Private key written to cosign.key +Public key written to cosign.pub +---- + +. Sign the key-value pair by entering the following command: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ cosign sign -key cosign.key quay.io/user1/busybox:test +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ cosign sign -key cosign.key quay-server.example.com/user1/busybox:test +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- +Enter password for private key: +Pushing signature to: quay-server.example.com/user1/busybox:sha256-ff13b8f6f289b92ec2913fa57c5dd0a874c3a7f8f149aabee50e3d01546473e3.sig +---- ++ +If you experience the `error: signing quay-server.example.com/user1/busybox:test: getting remote image: GET https://quay-server.example.com/v2/user1/busybox/manifests/test: UNAUTHORIZED: access to the requested resource is not authorized; map[]` error, which occurs because Cosign relies on `~./docker/config.json` for authorization, you might need to execute the following command: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman login --authfile ~/.docker/config.json quay.io +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ podman login --authfile ~/.docker/config.json quay-server.example.com +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- +Username: +Password: +Login Succeeded! +---- + +. Enter the following command to see the updated authorization configuration: ++ +[source,terminal] +---- +$ cat ~/.docker/config.json +{ + "auths": { + "quay-server.example.com": { + "auth": "cXVheWFkbWluOnBhc3N3b3Jk" + } + } +---- \ No newline at end of file diff --git a/modules/cosign-oci-with-quay.adoc b/modules/cosign-oci-with-quay.adoc index ccb2872d8..a172a5fcb 100644 --- a/modules/cosign-oci-with-quay.adoc +++ b/modules/cosign-oci-with-quay.adoc @@ -1,50 +1,99 @@ -[[cosign-oci-with-quay]] -= Using cosign with quay +:_content-type: CONCEPT +[id="cosign-oci-with-quay"] += Installing and using Cosign -If you have Go 1.16+, you can directly install cosign with the following command: +Use the following procedure to directly install Cosign. -.... +.Prerequisites + +* You have installed Go version 1.16 or later. +* You have set `FEATURE_GENERAL_OCI_SUPPORT` to `true` in your `config.yaml` file. + +.Procedure + +. Enter the following `go` command to directly install Cosign: ++ +[source,terminal] +---- $ go install github.com/sigstore/cosign/cmd/cosign@v1.0.0 +---- ++ +.Example output ++ +[source,terminal] +---- go: downloading github.com/sigstore/cosign v1.0.0 go: downloading github.com/peterbourgon/ff/v3 v3.1.0 -... -.... +---- -Next, generate a keypair: - -.... +. Generate a key-value pair for Cosign by entering the following command: ++ +[source,terminal] +---- $ cosign generate-key-pair +---- ++ +.Example output ++ +[source,terminal] +---- Enter password for private key: Enter again: Private key written to cosign.key Public key written to cosign.pub -.... - -Sign the keypair with the following command: +---- -.... +. Sign the key-value pair by entering the following command: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ cosign sign -key cosign.key quay.io/user1/busybox:test +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- $ cosign sign -key cosign.key quay-server.example.com/user1/busybox:test +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- Enter password for private key: Pushing signature to: quay-server.example.com/user1/busybox:sha256-ff13b8f6f289b92ec2913fa57c5dd0a874c3a7f8f149aabee50e3d01546473e3.sig -.... - -Some users may experience the following error: - -.... -error: signing quay-server.example.com/user1/busybox:test: getting remote image: GET https://quay-server.example.com/v2/user1/busybox/manifests/test: UNAUTHORIZED: access to the requested resource is not authorized; map[] -.... - -Because cosign relies on ~/.docker/config.json for authorization, you might need to execute the following command: - -.... +---- ++ +If you experience the `error: signing quay-server.example.com/user1/busybox:test: getting remote image: GET https://quay-server.example.com/v2/user1/busybox/manifests/test: UNAUTHORIZED: access to the requested resource is not authorized; map[]` error, which occurs because Cosign relies on `~./docker/config.json` for authorization, you might need to execute the following command: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman login --authfile ~/.docker/config.json quay.io +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- $ podman login --authfile ~/.docker/config.json quay-server.example.com +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- Username: Password: Login Succeeded! -.... +---- -You can see the updated authorization configuration using the following command: -.... +. Enter the following command to see the updated authorization configuration: ++ +[source,terminal] +---- $ cat ~/.docker/config.json { "auths": { @@ -52,4 +101,4 @@ $ cat ~/.docker/config.json "auth": "cXVheWFkbWluOnBhc3N3b3Jk" } } -.... +---- \ No newline at end of file diff --git a/modules/creating-a-team-api.adoc b/modules/creating-a-team-api.adoc new file mode 100644 index 000000000..35f86d547 --- /dev/null +++ b/modules/creating-a-team-api.adoc @@ -0,0 +1,35 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: PROCEDURE +[id="creating-a-team-api"] += Creating a team by using the API + +When you create a team for your organization with the API you can select the team name, +choose which repositories to make available to the team, and decide the +level of access to the team. + +Use the following procedure to create a team for your organization repository. + +.Prerequisites + +* You have created an organization. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/red_hat_quay_api_reference/index#updateorganizationteam[`PUT /api/v1/organization/{orgname}/team/{teamname}`] command to create a team for your organization: ++ +[source,terminal] +---- +$ curl -k -X PUT -H 'Accept: application/json' -H 'Content-Type: application/json' -H "Authorization: Bearer " --data '{"role": "creator"}' https:///api/v1/organization//team/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "example_team", "description": "", "can_view": true, "role": "creator", "avatar": {"name": "example_team", "hash": "dec209fd7312a2284b689d4db3135e2846f27e0f40fa126776a0ce17366bc989", "color": "#e7ba52", "kind": "team"}, "new_team": true} +---- \ No newline at end of file diff --git a/modules/creating-a-team-ui.adoc b/modules/creating-a-team-ui.adoc new file mode 100644 index 000000000..ac875750a --- /dev/null +++ b/modules/creating-a-team-ui.adoc @@ -0,0 +1,44 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="creating-a-team-ui"] += Creating a team by using the UI + +When you create a team for your organization you can select the team name, +choose which repositories to make available to the team, and decide the +level of access to the team. + +Use the following procedure to create a team for your organization repository. + +.Prerequisites + +* You have created an organization. + +.Procedure + +. On the {productname} v2 UI, click the name of an organization. + +. On your organization's page, click *Teams and membership*. + +. Click the *Create new team* box. + +. In the *Create team* popup window, provide a name for your new team. + +. Optional. Provide a description for your new team. + +. Click *Proceed*. A new popup window appears. + +. Optional. Add this team to a repository, and set the permissions to one of the following: ++ +* *None*. Team members have no permission to the repository. +* *Read*. Team members can view and pull from the repository. +* *Write*. Team members can read (pull) from and write (push) to the repository. +* *Admin*. Full access to pull from, and push to, the repository, plus the ability to do administrative tasks associated with the repository. + +. Optional. Add a team member or robot account. To add a team member, enter the name of their {productname} account. + +. Review and finish the information, then click *Review and Finish*. The new team appears under the *Teams and membership page*. \ No newline at end of file diff --git a/modules/creating-an-image-repository-via-docker.adoc b/modules/creating-an-image-repository-via-docker.adoc new file mode 100644 index 000000000..2742697f1 --- /dev/null +++ b/modules/creating-an-image-repository-via-docker.adoc @@ -0,0 +1,131 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT + +[id="creating-an-image-repository-via-docker"] += Creating a repository by using Podman + +With the proper credentials, you can _push_ an image to a repository using Podman that does not yet exist in your +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +instance. Pushing an image refers to the process of uploading a container image from your local system or development environment to a container registry like +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +After pushing an image to your registry, a repository is created. If you push an image through the command-line interface (CLI) without first creating a repository on the UI, the created repository is set to *Private*. + +ifeval::["{context}" == "quay-io"] +If you push an image through the command-line interface (CLI) without first creating a repository on the UI, the created repository is set to *Private*, regardless of the plan you have. + +[NOTE] +==== +It is recommended that you create a repository on the {quayio} UI before pushing an image. {quayio} checks the plan status and does not allow creation of a private repository if a plan is not active. +==== +endif::[] + +Use the following procedure to create an image repository by pushing an image. + +.Prerequisites + +* You have download and installed the `podman` CLI. +* You have logged into your registry. +* You have pulled an image, for example, busybox. + +.Procedure + +ifeval::["{context}" == "quay-io"] +. Pull a sample page from an example registry. For example: ++ +[source,terminal] +---- +$ podman pull busybox +---- ++ +.Example output ++ +[source,terminal] +---- +Trying to pull docker.io/library/busybox... +Getting image source signatures +Copying blob 4c892f00285e done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 +---- +endif::[] + +ifeval::["{context}" == "use-quay"] +. Pull a sample page from an example registry. For example: ++ +[source,terminal] +---- +$ sudo podman pull busybox +---- ++ +.Example output ++ +[source,terminal] +---- +Trying to pull docker.io/library/busybox... +Getting image source signatures +Copying blob 4c892f00285e done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 +---- +endif::[] + + +. Tag the image on your local system with the new repository and image name. For example: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay.io/quayadmin/busybox:test +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ sudo podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- +endif::[] + +. Push the image to the registry. Following this step, you can use your browser to see the tagged image in your repository. ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman push --tls-verify=false quay.io/quayadmin/busybox:test +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 6b245f040973 done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +---- \ No newline at end of file diff --git a/modules/creating-an-image-repository-via-skopeo-copy.adoc b/modules/creating-an-image-repository-via-skopeo-copy.adoc new file mode 100644 index 000000000..89c6747c9 --- /dev/null +++ b/modules/creating-an-image-repository-via-skopeo-copy.adoc @@ -0,0 +1,72 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT + +[id="creating-an-image-repository-via-skopeo-copy"] += Creating a repository by using Skopeo + +In some cases, the `podman` CLI tool is unable to pull certain artifact types, for example, `application/x-mlmodel`. Attempting to use `podman pull` with this artifact type results in the following error: + +[source,terminal] +---- +Error: parsing image configuration: unsupported image-specific operation on artifact with type "application/x-mlmodel" +---- + +As an alternative, you can use `skopeo copy` to copy an artifact from one location to your {productname} repository. + +.Prerequisites + +* You have installed the `skopeo` CLI. +* You have logged in to a source registry (in this example, `\registry.redhat.io`) and have a valid authentication file (`~/.docker/config.json`). Alternatively, you can provide credentials by using the `--src-username` and `--src-password` parameters when running a command with the `skopeo` CLI. +* You have logged in to your {productname} repository. + +.Procedure + +* Use the `skopeo copy` command on an artifact to copy the artifact to your {productname} repository. For example: ++ +[source,terminal] +---- +$ sudo skopeo copy --dest-tls-verify=false --all \ <1> <2> + --src-username --src-password \ <3> + --src-authfile ~/.docker/config.json \ <4> + --dest-username --dest-password \ <5> + docker://registry.redhat.io/rhelai1/granite-8b-code-instruct:1.4-1739210683 \ <6> + docker://quay-server.example.com//granite-8b-code-instruct:latest <7> +---- +<1> Optional. `--dest-tls-verify=false` disables SSL/TLS verification for the destination registry. +<2> Optional. The `--all` flag optionally copies all image manifests, including multi-architecture images. +<3> Optional. If you are not logged into a registry, you can pass in the source registry credentials with these parameters. +<4> Optional. The path to your Docker authentication file. Typically located at `~/.docker/config.json`. +<5> Your {productname} registry username and password. +<6> The source image or artifact from the Red{nbsp}Hat container registry. Ensure that you are logged in to the registry and that you can pull the image. +<7> The URL of your {productname} repository appended with a namespace and the name of the image. ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Checking if image destination supports signatures +Copying blob 9538fa2b8ad9 done | +Copying blob 491ae95f59a2 done | +Copying blob 01196d075d77 done | +Copying blob e53a4633c992 done | +Copying blob c266e9cfa731 done | +Copying blob dae0e701d9b2 done | +Copying blob 1e227a2c78d8 done | +Copying blob 94ff9338861b done | +Copying blob 2f2bba45146f done | +Copying blob d3b4df07a0ce done | +Copying blob f533a8dbb852 done | +Copying config 44136fa355 done | +Writing manifest to image destination +Storing signatures +---- + +.Next steps + +* After you have pushed a machine learning artifact to your {productname} repository, you can link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-and-modifying-tags[View tag information by using the UI] or link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-model-card-information[View model card information by using the UI]. \ No newline at end of file diff --git a/modules/creating-an-image-repository-via-the-api.adoc b/modules/creating-an-image-repository-via-the-api.adoc new file mode 100644 index 000000000..a7421355d --- /dev/null +++ b/modules/creating-an-image-repository-via-the-api.adoc @@ -0,0 +1,43 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="creating-an-image-repository-via-the-api"] += Creating a repository by using the API + +ifeval::["{context}" == "quay-security"] +Private repositories can be created by using the API and specifying the the `"visibility": `private`` option. +endif::[] + +Use the following procedure to create an image repository using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to create a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createrepo[`POST /api/v1/repository`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "repository": "", + "visibility": "", + "description": "." + }' \ + "https://quay-server.example.com/api/v1/repository" +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "quayadmin", "name": "", "kind": "image"} +---- diff --git a/modules/creating-an-image-repository-via-the-ui.adoc b/modules/creating-an-image-repository-via-the-ui.adoc new file mode 100644 index 000000000..f4b2e33bf --- /dev/null +++ b/modules/creating-an-image-repository-via-the-ui.adoc @@ -0,0 +1,47 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="creating-an-image-repository-via-the-ui"] += Creating a repository by using the UI + +Use the following procedure to create a repository using the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. + +.Procedure + +Use the following procedure to create a repository using the v2 UI. + +.Procedure + +. Click *Repositories* on the navigation pane. + +. Click *Create Repository*. + +. Select a namespace, for example, *quayadmin*, and then enter a *Repository name*, for example, `testrepo`. ++ +[IMPORTANT] +==== +Do not use the following words in your repository name: +* `build` +* `trigger` +* `tag` +* `notification` + +When these words are used for repository names, users are unable access the repository, and are unable to permanently delete the repository. Attempting to delete these repositories returns the following error: `Failed to delete repository , HTTP404 - Not Found.` +==== + +. Click *Create*. ++ +Now, your example repository should populate under the *Repositories* page. + +. Optional. Click *Settings* -> *Repository visibility* -> *Make private* to set the repository to private. \ No newline at end of file diff --git a/modules/creating-custom-ssl-certs-config-bundle.adoc b/modules/creating-custom-ssl-certs-config-bundle.adoc new file mode 100644 index 000000000..ada9de273 --- /dev/null +++ b/modules/creating-custom-ssl-certs-config-bundle.adoc @@ -0,0 +1,239 @@ +:_content-type: PROCEDURE +[id="creating-custom-ssl-certs-config-bundle"] += Creating a custom SSL/TLS configBundleSecret resource + +After creating your custom SSL/TLS certificates, you can create a custom `configBundleSecret` resource for {productname-ocp}, which allows you to upload `ssl.cert` and `ssl.key` files. + +.Prerequisites + +* You have base64 decoded the original config bundle into a `config.yaml` file. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-config-cli-download[Downloading the existing configuration]. +* You have generated custom SSL certificates and keys. + +.Procedure + +. Create a new YAML file, for example, `custom-ssl-config-bundle-secret.yaml`: ++ +[source,terminal] +---- +$ touch custom-ssl-config-bundle-secret.yaml +---- + +. Create the `custom-ssl-config-bundle-secret` resource. + +.. Create the resource by entering the following command: ++ +[source,terminal] +---- +$ oc -n create secret generic custom-ssl-config-bundle-secret \ + --from-file=config.yaml= \ <1> + --from-file=ssl.cert= \ <2> + --from-file=extra_ca_cert_.crt=ca-certificate-bundle.crt + \ <3> + --from-file=ssl.key= \ <4> + --dry-run=client -o yaml > custom-ssl-config-bundle-secret.yaml +---- +<1> Where `` is your `base64 decoded` `config.yaml` file. +<2> Where `` is your `ssl.cert` file. +<3> Optional. The `--from-file=extra_ca_cert_.crt=ca-certificate-bundle.crt` field allows {productname} to recognize custom Certificate Authority (CA) files. If you are using LDAP, OIDC, or another service that uses custom CAs, you must add them via the `extra_ca_cert` path. For more information, see "Adding additional Certificate Authorities to {productname-ocp}." +<4> Where `` is your `ssl.key` file. + +. Optional. You can check the content of the `custom-ssl-config-bundle-secret.yaml` file by entering the following command: ++ +[source,terminal] +---- +$ cat custom-ssl-config-bundle-secret.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +apiVersion: v1 +data: + config.yaml: QUxMT1dfUFVMTFNfV0lUSE9VVF9TVFJJQ1RfTE9HR0lORzogZmFsc2UKQVVUSEVOVElDQVRJT05fVFlQRTogRGF0YWJhc2UKREVGQVVMVF9UQUdfRVhQSVJBVElPTjogMncKRElTVFJJQlVURURfU1R... + ssl.cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVYakNDQTBhZ0F3SUJBZ0lVTUFBRk1YVWlWVHNoMGxNTWI3U1l0eFV5eTJjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2dZZ3hDekFKQmdOVkJBWVR... + extra_ca_cert_:LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVYakNDQTBhZ0F3SUJBZ0lVTUFBRk1YVWlWVHNoMGxNTWI3U1l0eFV5eTJjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2dZZ3hDe... + ssl.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQ2c0VWxZOVV1SVJPY1oKcFhpZk9MVEdqaS9neUxQMlpiMXQ... +kind: Secret +metadata: + creationTimestamp: null + name: custom-ssl-config-bundle-secret + namespace: +---- + +. Create the `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc create -n -f custom-ssl-config-bundle-secret.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +secret/custom-ssl-config-bundle-secret created +---- + +. Update the `QuayRegistry` YAML file to reference the `custom-ssl-config-bundle-secret` object by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"configBundleSecret":"custom-ssl-config-bundle-secret"}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. Set the `tls` component of the `QuayRegistry` YAML to `false` by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"components":[{"kind":"tls","managed":false}]}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. Ensure that your `QuayRegistry` YAML file has been updated to use the custom SSL `configBundleSecret` resource, and that your and `tls` resource is set to `false` by entering the following command: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml +---- ++ +.Example output ++ +[source,terminal] +---- +# ... + configBundleSecret: custom-ssl-config-bundle-secret +# ... +spec: + components: + - kind: tls + managed: false +# ... +---- + +//// +. Set the `route` component of the `QuayRegistry` YAML to `false` by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"components":[{"kind":"route","managed":false}]}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. You must set the `Routes` to `Passthrough`. This can be done on the {ocp} web console. + +.. Click *Networking* -> *Routes*. + +.. Click the menu kebab for your registry, then click *Edit Route*. + +.. For *Hostname*, include the URL of your {productname} registry. + +.. For *Service*, select *<_registry_quay_app*. + +.. For *Target port*, select *443 -> 8443 (TCP)*. + +.. For *TLS termination* select *Passthrough*. + +.. For *Insecure traffic* select *Redirect*. Then, click *Save*. + +. Monitor your {productname} registry deployment: + +.. On the {ocp} web console click *Operators* -> *Installed Operators* -> *{productname}*. + +.. Click *Quay Registry*, then click the name of your registry. + +.. Click *Events* to monitor the progress of the reconfiguration. If necessary, you can restart all pods by deleting them. For example: ++ +[source,terminal] +---- +$ oc get pods -n | grep quay +---- ++ +.Example output ++ +[source,terminal] +---- +example-registry-quay-app-6c5bc8ffb7-4qr5v 1/1 Running 0 18m +example-registry-quay-app-6c5bc8ffb7-xwswd 1/1 Running 0 20m +example-registry-quay-database-5f64c9db49-bmg9v 1/1 Running 0 156m +example-registry-quay-mirror-797458dcc7-ktw9v 1/1 Running 0 19m +example-registry-quay-mirror-797458dcc7-tmcxd 1/1 Running 0 19m +example-registry-quay-redis-5f6b6cc597-rltc5 1/1 Running 0 20m +quay-operator.v3.12.1-5b7dbd57df-xrs87 1/1 Running 0 24h +---- ++ +[source,terminal] +---- +$ oc delete pods -n example-registry-quay-app-6c5bc8ffb7-4qr5v example-registry-quay-app-6c5bc8ffb7-xwswd example-registry-quay-database-5f64c9db49-bmg9v example-registry-quay-mirror-797458dcc7-ktw9v example-registry-quay-mirror-797458dcc7-tmcxd example-registry-quay-redis-5f6b6cc597-rltc5 quay-operator.v3.12.1-5b7dbd57df-xrs87 +---- + + +... Create an SSL/TLS bundle by concatenating the SSL certificate and the CA certificate. For example: ++ +[source,terminal] +---- +$ cat ssl.cert ca.cert > ssl-bundle.cert +---- ++ +[NOTE] +==== +Depending on your needs, you might also include `intermediateCA.pem` CAs, `rootCA.pem` CAs, or other CAs into the `ssl-bundle.cert` as necessary. Do not include private keys in your configuration bundle. +==== + +. If not already set, update your `config.yaml` file to include the `PREFERRED_URL_SCHEME: https`, `EXTERNAL_TLS_TERMINATION: false`, and `SERVER_HOSTNAME` fields: ++ +[source,yaml] +---- +PREFERRED_URL_SCHEME: https +EXTERNAL_TLS_TERMINATION: false +SERVER_HOSTNAME: +---- +//// + +.Verification + +* Confirm a TLS connection to the server and port by entering the following command: ++ +[source,terminal] +---- +$ openssl s_client -connect :443 +---- ++ +.Example output ++ +[source,terminal] +---- +# ... +SSL-Session: + Protocol : TLSv1.3 + Cipher : TLS_AES_256_GCM_SHA384 + Session-ID: 0E995850DC3A8EB1A838E2FF06CE56DBA81BD8443E7FA05895FBD6FBDE9FE737 + Session-ID-ctx: + Resumption PSK: 1EA68F33C65A0F0FA2655BF9C1FE906152C6E3FEEE3AEB6B1B99BA7C41F06077989352C58E07CD2FBDC363FA8A542975 + PSK identity: None + PSK identity hint: None + SRP username: None + TLS session ticket lifetime hint: 7200 (seconds) + +# ... +---- + diff --git a/modules/creating-image-expiration-notification.adoc b/modules/creating-image-expiration-notification.adoc new file mode 100644 index 000000000..195df1b3b --- /dev/null +++ b/modules/creating-image-expiration-notification.adoc @@ -0,0 +1,83 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="creating-image-expiration-notification"] += Creating an image expiration notification + +Image expiration event triggers can be configured to notify users through email, Slack, webhooks, and so on, and can be configured at the repository level. Triggers can be set for images expiring in any amount of days, and can work in conjunction with the auto-pruning feature. + +Image expiration notifications can be set by using the {productname} v2 UI or by using the `createRepoNotification` API endpoint. + +.Prerequisites + +* `FEATURE_GARBAGE_COLLECTION: true` is set in your `config.yaml` file. +* Optional. `FEATURE_AUTO_PRUNE: true` is set in your `config.yaml` file. + +.Procedure + +. On the {productname} v2 UI, click *Repositories*. + +. Select the name of a repository. + +. Click *Settings* -> *Events and notifications*. + +. Click *Create notification*. The *Create notification* popup box appears. + +. Click the *Select event...* box, then click *Image expiry trigger*. + +. In the *When the image is due to expiry in days* box, enter the number of days before the image's expiration when you want to receive an alert. For example, use `1` for 1 day. + +. In the *Select method...* box, click one of the following: ++ +* E-mail +* Webhook POST +* Flowdock Team Notification +* HipChat Room Notification +* Slack Notification + +. Depending on which method you chose, include the necessary data. For example, if you chose *Webhook POST*, include the `Webhook URL`. + +. Optional. Provide a *POST JSON body template*. + +. Optional. Provide a *Title* for your notification. + +. Click *Submit*. You are returned to the *Events and notifications* page, and the notification now appears. + +. Optional. You can set the `NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES` variable in your config.yaml file. with this field set, if there are any expiring images notifications will be sent automatically. By default, this is set to `300`, or 5 hours, however it can be adjusted as warranted. ++ +[source,yaml] +---- +NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES: 300 <1> +---- +<1> By default, this field is set to `300`, or 5 hours. + +.Verification + +. Click the menu kebab -> *Test Notification*. The following message is returned: ++ +[source,text] +---- +Test Notification Queued +A test version of this notification has been queued and should appear shortly +---- + +. Depending on which method you chose, check your e-mail, webhook address, Slack channel, and so on. The information sent should look similar to the following example: ++ +[source,json] +---- +{ + "repository": "sample_org/busybox", + "namespace": "sample_org", + "name": "busybox", + "docker_url": "quay-server.example.com/sample_org/busybox", + "homepage": "http://quay-server.example.com/repository/sample_org/busybox", + "tags": [ + "latest", + "v1" + ], + "expiring_in": "1 days" +} +---- \ No newline at end of file diff --git a/modules/creating-notifications-api.adoc b/modules/creating-notifications-api.adoc new file mode 100644 index 000000000..994ebf296 --- /dev/null +++ b/modules/creating-notifications-api.adoc @@ -0,0 +1,94 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="managing-notifications-api"] += Creating notifications by using the API + +Use the following procedure to add notifications. + +.Prerequisites + +* You have created a repository. +* You have administrative privileges for the repository. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createreponotification[`POST /api/v1/repository/{repository}/notification`] command to create a notification on your repository: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "event": "", + "method": "", + "config": { + "": "" + }, + "eventConfig": { + "": "" + } + }' \ + https:///api/v1/repository///notification/ +---- ++ +This command does not return output in the CLI. Instead, you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getreponotification[`GET /api/v1/repository/{repository}/notification/{uuid}`] command to obtain information about the repository notification: ++ +[source,terminal] +---- +{"uuid": "240662ea-597b-499d-98bb-2b57e73408d6", "title": null, "event": "repo_push", "method": "quay_notification", "config": {"target": {"name": "quayadmin", "kind": "user", "is_robot": false, "avatar": {"name": "quayadmin", "hash": "b28d563a6dc76b4431fc7b0524bbff6b810387dac86d9303874871839859c7cc", "color": "#17becf", "kind": "user"}}}, "event_config": {}, "number_of_failures": 0} +---- + +. You can test your repository notification by entering the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#testreponotification[`POST /api/v1/repository/{repository}/notification/{uuid}/test`] command: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification//test +---- ++ +.Example output ++ +[source,terminal] +---- +{} +---- + +. You can reset repository notification failures to 0 by entering the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#resetrepositorynotificationfailures[`POST /api/v1/repository/{repository}/notification/{uuid}`] command: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification/ +---- + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletereponotification[`DELETE /api/v1/repository/{repository}/notification/{uuid}`] command to delete a repository notification: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository///notification/ +---- ++ +This command does not return output in the CLI. Instead, you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listreponotifications[`GET /api/v1/repository/{repository}/notification/`] command to retrieve a list of all notifications: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" https:///api/v1/repository///notification +---- ++ +.Example output ++ +[source,terminal] +---- +{"notifications": []} +---- \ No newline at end of file diff --git a/modules/creating-notifications.adoc b/modules/creating-notifications.adoc new file mode 100644 index 000000000..d5cabff12 --- /dev/null +++ b/modules/creating-notifications.adoc @@ -0,0 +1,51 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="creating-notifications"] += Creating notifications by using the UI + +Use the following procedure to add notifications. + +.Prerequisites + +* You have created a repository. +* You have administrative privileges for the repository. + +.Procedure + +ifeval::["{context}" == "quay-io"] +. Navigate to a repository on {quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +. Navigate to a repository on {productname}. +endif::[] + +. In the navigation pane, click *Settings*. + +. In the *Events and Notifications* category, click *Create Notification* to add a new notification for a repository event. The *Create notification* popup box appears. + +. On the *Create repository* popup box, click the *When this event occurs* box to select an event. You can select a notification for the following types of events: ++ +* Push to Repository +* Image build failed +* Image build queued +* Image build started +* Image build success +* Image build cancelled +* Image expiry trigger + +. After you have selected the event type, select the notification method. The following methods are supported: ++ +* Quay Notification +* E-mail Notification +* Webhook POST +* Flowdock Team Notification +* HipChat Room Notification +* Slack Notification ++ +Depending on the method that you choose, you must include additional information. For example, if you select *E-mail*, you are required to include an e-mail address and an optional notification title. + +. After selecting an event and notification method, click *Create Notification*. \ No newline at end of file diff --git a/modules/creating-oauth-access-token.adoc b/modules/creating-oauth-access-token.adoc new file mode 100644 index 000000000..9a5ad10c1 --- /dev/null +++ b/modules/creating-oauth-access-token.adoc @@ -0,0 +1,51 @@ +:_content-type: PROCEDURE +[id="creating-oauth-access-token"] += Creating an OAuth 2 access token + +With {productname}, you must create an OAuth 2 access token before you can access the API endpoints of your organization. OAuth 2 access token can only be generated by using the {productname} UI; the CLI cannot be used to generate an OAuth 2 access token. + +Use the following procedure to create an OAuth2 access token. + +.Prerequisites + +* You have logged in to {productname} as an administrator. +* You have created an OAuth 2 application. + +.Procedure + +. On the main page, select an Organization. + +. In the navigation pane, select *Applications*. + +. Click the name of your application, for example, *Test application*. + +. In the navigation pane, select *Generate Token*. + +. Check the boxes for the following options: + +.. *Administer Organization*. When selected, allows the user to be able to administer organizations, including creating robots, creating teams, adjusting team membership, and changing billing settings. + +.. *Administer Repositories*. When selected, provides the user administrator access to all repositories to which the granting user has access. + +.. *Create Repositories*. When selected, provides the user the ability to create repositories in any namespaces that the granting user is allowed to create repositories. + +.. *View all visible repositories*. When selected, provides the user the ability to view and pull all repositories visible to the granting user. + +.. *Read/Write to any accessible repositories*. When selected, provides the user the ability to view, push and pull to all repositories to which the granting user has write access. + +.. *Super User Access*. When selected, provides the user the ability to administer your installation including managing users, managing organizations and other features found in the superuser panel. + +.. *Administer User* When selected, provides the user the ability to administer your account including creating robots and granting them permissions to your repositories. + +.. *Read User Information*. When selected, provides the user the ability to read user information such as username and email address. + +. Click *Generate Access Token*. You are redirected to a new page. + +. Review the permissions that you are allowing, then click *Authorize Application*. Confirm your decision by clicking *Authorize Application*. + +. You are redirected to the *Access Token* page. Copy and save the access token. ++ +[IMPORTANT] +==== +This is the only opportunity to copy and save the access token. It cannot be reobtained after leaving this page. +==== \ No newline at end of file diff --git a/modules/creating-oauth-application-api.adoc b/modules/creating-oauth-application-api.adoc new file mode 100644 index 000000000..33cfb207c --- /dev/null +++ b/modules/creating-oauth-application-api.adoc @@ -0,0 +1,79 @@ +:_content-type: PROCEDURE +[id="creating-oauth-application-api"] += Managing a user application by using the API + +{productname} users can create, list information about, and delete a _user application_ that can be used as an alternative to using your password for Docker, Podman, or other service providers. User application tokens work like your username and password, but are encrypted and do not provide any information to third parties regarding who is accessing {productname}. + +[NOTE] +==== +After creation via the CLI, the user application token is listed under *User Settings* of the {productname} UI. Note that this differs from an application token that is created under user settings, and should be considered a different application entirely. +==== + +Use the following procedure to create a user application token. + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +* Create a user application by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#appspecifictokens[`POST /api/v1/user/apptoken`] API call: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "title": "MyAppToken" + }' \ + "http://quay-server.example.com/api/v1/user/apptoken" +---- ++ +.Example output ++ +[source,terminal] +---- +{"token": {"uuid": "6b5aa827-cee5-4fbe-a434-4b7b8a245ca7", "title": "MyAppToken", "last_accessed": null, "created": "Wed, 08 Jan 2025 19:32:48 -0000", "expiration": null, "token_code": "K2YQB1YO0ABYV5OBUYOMF9MCUABN12Y608Q9RHFXBI8K7IE8TYCI4WEEXSVH1AXWKZCKGUVA57PSA8N48PWED9F27PXATFUVUD9QDNCE9GOT9Q8ACYPIN0HL"}} +---- + +* You can obtain information about your application, including when the application expires, by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listapptokens[`GET /api/v1/user/apptoken`] command. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken" +---- ++ +[source,terminal] +---- +{"tokens": [{"uuid": "6b5aa827-cee5-4fbe-a434-4b7b8a245ca7", "title": "MyAppToken", "last_accessed": null, "created": "Wed, 08 Jan 2025 19:32:48 -0000", "expiration": null}], "only_expiring": null} +---- + +* You can obtain information about a specific user application by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getapptoken[`GET /api/v1/user/apptoken/{token_uuid}`] command: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"token": {"uuid": "6b5aa827-cee5-4fbe-a434-4b7b8a245ca7", "title": "MyAppToken", "last_accessed": null, "created": "Wed, 08 Jan 2025 19:32:48 -0000", "expiration": null, "token_code": "K2YQB1YO0ABYV5OBUYOMF9MCUABN12Y608Q9RHFXBI8K7IE8TYCI4WEEXSVH1AXWKZCKGUVA57PSA8N48PWED9F27PXATFUVUD9QDNCE9GOT9Q8ACYPIN0HL"}} +---- + +* You can delete or revoke a user application token by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#revokeapptoken[`DELETE /api/v1/user/apptoken/{token_uuid}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken/" +---- ++ +This command does not return output in the CLI. You can return a list of tokens by entering one of the aforementioned commands. \ No newline at end of file diff --git a/modules/creating-oauth-application.adoc b/modules/creating-oauth-application.adoc new file mode 100644 index 000000000..c98e6934c --- /dev/null +++ b/modules/creating-oauth-application.adoc @@ -0,0 +1,34 @@ +:_content-type: PROCEDURE +[id="creating-oauth-application"] += Creating an OAuth 2 application by using the UI + +{productname} administrators can define an application by specifying a unique name, a homepage URL, a description of the application's uses, an e-mail, or a redirect/callback URL. + +[NOTE] +==== +The following application token is created under an Organization. This differs from an application token that is created under user settings, and should be considered a different application entirely. +==== + +Use the following procedure to create an OAuth2 application. + +.Prerequisites + +* You have logged in to {productname} as an administrator. + +.Procedure + +. On the main page, select an Organization. + +. In the navigation pane, select *Applications*. + +. Click *Create New Application* and provide a new application name, then press *Enter*. + +. On the *OAuth Applications* page, select the name of your application. + +. Optional. Enter the following information: + +.. *Application Name* +.. *Homepage URL* +.. *Description* +.. *Avatar E-mail* +.. *Redirect/Callback URL prefix* \ No newline at end of file diff --git a/modules/creating-org-policy-api.adoc b/modules/creating-org-policy-api.adoc new file mode 100644 index 000000000..f89995abe --- /dev/null +++ b/modules/creating-org-policy-api.adoc @@ -0,0 +1,92 @@ +[id="creating-org-policy-api"] += Creating an auto-prune policy for a namespace by using the {productname} API + +You can use {productname} API endpoints to manage auto-pruning policies for an namespace. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationautoprunepolicy[`POST /api/v1/organization/{orgname}/autoprunepolicy/`] command create a new policy that limits the number of tags allowed in an organization: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/organization//autoprunepolicy/ +---- ++ +Alternatively, you can can set tags to expire for a specified time after their creation date: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ +"method": "creation_date", "value": "7d"}' http:///api/v1/organization//autoprunepolicy/ +---- ++ +.Example output +[source,terminal] +---- +{"uuid": "73d64f05-d587-42d9-af6d-e726a4a80d6e"} +---- + +. Optional. You can add an additional policy to an organization and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "creation_date", + "value": "7d", + "tagPattern": "^v*", + "tagPatternMatches": <1> + }' \ + "https:///api/v1/organization//autoprunepolicy/" +---- +<1> Setting `tagPatternMatches` to `true` makes it so that tags that match the given regex pattern will be pruned. In this example, tags that match `^v*` are pruned. ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ebf7448b-93c3-4f14-bf2f-25aa6857c7b0"} +---- + +. You can update your organization's auto-prune policy by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationautoprunepolicy[`PUT /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' "/api/v1/organization//autoprunepolicy/" +---- ++ +This command does not return output. Continue to the next step. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "ebf7448b-93c3-4f14-bf2f-25aa6857c7b0", "method": "creation_date", "value": "4d", "tagPattern": "^v*", "tagPatternMatches": true}, {"uuid": "da4d0ad7-3c2d-4be8-af63-9c51f9a501bc", "method": "number_of_tags", "value": 10, "tagPattern": null, "tagPatternMatches": true}, {"uuid": "17b9fd96-1537-4462-a830-7f53b43f94c2", "method": "creation_date", "value": "7d", "tagPattern": "^v*", "tagPatternMatches": true}]} +---- + +. You can delete the auto-prune policy for your organization by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/73d64f05-d587-42d9-af6d-e726a4a80d6e +---- \ No newline at end of file diff --git a/modules/creating-policy-api-current-user.adoc b/modules/creating-policy-api-current-user.adoc new file mode 100644 index 000000000..1407aa297 --- /dev/null +++ b/modules/creating-policy-api-current-user.adoc @@ -0,0 +1,66 @@ +[id="creating-policy-api-current-user"] += Creating an auto-prune policy for a namespace for the current user by using the API + +You can use {productname} API endpoints to manage auto-pruning policies for your account. + +[NOTE] +==== +The use of `/user/` in the following commands represents the user that is currently logged into {productname}. +==== + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following `POST` command create a new policy that limits the number of tags for the current user: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/user/autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859"} +---- + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/8c03f995-ca6f-4928-b98d-d75ed8c14859 +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859", "method": "number_of_tags", "value": 10}]} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/8c03f995-ca6f-4928-b98d-d75ed8c14859 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859"} +---- diff --git a/modules/creating-policy-api-other-user.adoc b/modules/creating-policy-api-other-user.adoc new file mode 100644 index 000000000..c92c9d98d --- /dev/null +++ b/modules/creating-policy-api-other-user.adoc @@ -0,0 +1,100 @@ + +[id="creating-policy-api-other-user"] += Creating an auto-prune policy on a repository for a user with the API + +You can use {productname} API endpoints to manage auto-pruning policies on a repository for user accounts that are not your own, so long as you have `admin` privileges on the repository. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. +* You have `admin` privileges on the repository that you are creating the policy for. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createuserautoprunepolicy[`POST /api/v1/repository///autoprunepolicy/`] command create a new policy that limits the number of tags for the user: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' https:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "7726f79c-cbc7-490e-98dd-becdc6fefce7"} +---- + +. Optional. You can add an additional policy for the current user and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "creation_date", + "value": "7d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' \ + "http:///api/v1/repository///autoprunepolicy/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "b3797bcd-de72-4b71-9b1e-726dabc971be"} +---- + +. You can update your policy for the current user by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateuserautoprunepolicy[`PUT /api/v1/repository///autoprunepolicy/`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^test.", + "tagPatternMatches": true + }' "https:///api/v1/repository///autoprunepolicy/" +---- ++ +Updating a policy does not return output in the CLI. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/7726f79c-cbc7-490e-98dd-becdc6fefce7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "81ee77ec-496a-4a0a-9241-eca49437d15b", "method": "creation_date", "value": "7d", "tagPattern": "^v*", "tagPatternMatches": true} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "7726f79c-cbc7-490e-98dd-becdc6fefce7"} +---- diff --git a/modules/creating-repository-policy-api.adoc b/modules/creating-repository-policy-api.adoc new file mode 100644 index 000000000..b0a7d64fa --- /dev/null +++ b/modules/creating-repository-policy-api.adoc @@ -0,0 +1,110 @@ +[id="creating-repository-policy-api"] += Creating an auto-prune policy for a repository using the {productname} API + +You can use {productname} API endpoints to manage auto-pruning policies for an repository. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationautoprunepolicy[`POST /api/v1/repository/{repository}/autoprunepolicy/`] command create a new policy that limits the number of tags allowed in an organization: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can can set tags to expire for a specified time after their creation date: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "creation_date", "value": "7d"}' http:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7"} +---- + +. Optional. You can add an additional policy and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "", + "value": "<7d>", + "tagPattern": "<^test.>*", + "tagPatternMatches": <1> + }' \ + "https:///api/v1/repository///autoprunepolicy/" +---- +<1> Setting `tagPatternMatches` to `false` makes it so that tags that all tags that _do not_ match the given regex pattern are pruned. In this example, all tags _but_ `^test.` are pruned. ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "b53d8d3f-2e73-40e7-96ff-736d372cd5ef"} +---- + +. You can update your policy for the repository by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updaterepositoryautoprunepolicy[`PUT /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid}`] command and passing in the UUID. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": "5", + "tagPattern": "^test.*", + "tagPatternMatches": true + }' \ + "https://quay-server.example.com/api/v1/repository///autoprunepolicy/" +---- ++ +This command does not return output. Continue to the next step to check your auto-prune policy. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7", "method": "number_of_tags", "value": 10}]} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7"} +---- \ No newline at end of file diff --git a/modules/creating-robot-account-api.adoc b/modules/creating-robot-account-api.adoc new file mode 100644 index 000000000..d380f3a0b --- /dev/null +++ b/modules/creating-robot-account-api.adoc @@ -0,0 +1,45 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="creating-robot-account-api"] += Creating a robot account by using the {productname} API + +Use the following procedure to create a robot account using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Enter the following command to create a new robot account for an organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorgrobot[`PUT /api/v1/organization/{orgname}/robots/{robot_shortname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " "https:///api/v1/organization//robots/" +---- ++ +Example output ++ +[source,terminal] +---- +{"name": "orgname+robot-name", "created": "Fri, 10 May 2024 15:11:00 -0000", "last_accessed": null, "description": "", "token": "", "unstructured_metadata": null} +---- + +* Enter the following command to create a new robot account for the current user with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createuserrobot[`PUT /api/v1/user/robots/{robot_shortname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " "https:///api/v1/user/robots/" +---- ++ +Example output ++ +[source,terminal] +---- +{"name": "quayadmin+robot-name", "created": "Fri, 10 May 2024 15:24:57 -0000", "last_accessed": null, "description": "", "token": "", "unstructured_metadata": null} +---- \ No newline at end of file diff --git a/modules/creating-robot-account-v2-ui.adoc b/modules/creating-robot-account-v2-ui.adoc new file mode 100644 index 000000000..36f7a0846 --- /dev/null +++ b/modules/creating-robot-account-v2-ui.adoc @@ -0,0 +1,44 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="creating-robot-account-v2-ui"] += Creating a robot account by using the UI + +Use the following procedure to create a robot account using the v2 UI. + +.Procedure + +. On the v2 UI, click *Organizations*. + +. Click the name of the organization that you will create the robot account for, for example, `test-org`. + +. Click the *Robot accounts* tab -> *Create robot account*. + +. In the *Provide a name for your robot account* box, enter a name, for example, `robot1`. The name of your Robot Account becomes a combination of your username plus the name of the robot, for example, `quayadmin+robot1` + +. Optional. The following options are available if desired: + +.. Add the robot account to a team. + +.. Add the robot account to a repository. + +.. Adjust the robot account's permissions. + +. On the *Review and finish* page, review the information you have provided, then click *Review and finish*. The following alert appears: *Successfully created robot account with robot name: + *. ++ +Alternatively, if you tried to create a robot account with the same name as another robot account, you might receive the following error message: *Error creating robot account*. + +. Optional. You can click *Expand* or *Collapse* to reveal descriptive information about the robot account. + +. Optional. You can change permissions of the robot account by clicking the kebab menu -> *Set repository permissions*. The following message appears: *Successfully updated repository permission*. + +. Optional. You can click the name of your robot account to obtain the following information: + +* *Robot Account*: Select this obtain the robot account token. You can regenerate the token by clicking *Regenerate token now*. +* *Kubernetes Secret*: Select this to download credentials in the form of a Kubernetes pull secret YAML file. +* *Podman*: Select this to copy a full `podman login` command line that includes the credentials. +* *Docker Configuration*: Select this to copy a full `docker login` command line that includes the credentials. \ No newline at end of file diff --git a/modules/creating-user-account-quay-api.adoc b/modules/creating-user-account-quay-api.adoc new file mode 100644 index 000000000..f60bed00e --- /dev/null +++ b/modules/creating-user-account-quay-api.adoc @@ -0,0 +1,70 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available + +:_content-type: CONCEPT +[id="creating-user-account-quay-api"] += Creating a user account by using the {productname} API + +Use the following procedure to create a new user for your {productname} repository by using the API. + +.Prerequisites + +* You are logged into your {productname} deployment as a superuser. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to create a new user using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createinstalluser[`POST /api/v1/superuser/users/`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "username": "newuser", + "email": "newuser@example.com" +}' "https:///api/v1/superuser/users/" +---- ++ +Example output ++ +[source,terminal] +---- +{"username": "newuser", "email": "newuser@example.com", "password": "123456789", "encrypted_password": "/JKY9pnDcsw="} +---- + +. Navigate to your {productname} registry endpoint, for example, `quay-server.example.com` and login with the username and password generated from the API call. In this scenario, the username is `newuser` and the password is `123456789`. Alternatively, you can log in to the registry with the CLI. For example: ++ +[source,terminal] +---- +$ podman login +---- ++ +.Example output ++ +[source,terminal] +---- +username: newuser +password: 123456789 +---- + +. Optional. You can obtain a list of all users, including superusers, by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listallusers[`GET /api/v1/superuser/users/`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/superuser/users/" +---- ++ +[NOTE] +==== +The `GET /api/v1/superuser/users/` endpoint only returns users and superusers if `AUTHENTICATION_TYPE: Database` is set in your `config.yaml` file. It does not work for `LDAP` authentication types. +==== ++ +Example output ++ +[source,terminal] +---- +{"users": [{"kind": "user", "name": "quayadmin", "username": "quayadmin", "email": "quay@quay.com", "verified": true, "avatar": {"name": "quayadmin", "hash": "b28d563a6dc76b4431fc7b0524bbff6b810387dac86d9303874871839859c7cc", "color": "#17becf", "kind": "user"}, "super_user": true, "enabled": true}, {"kind": "user", "name": "newuser", "username": "newuser", "email": "newuser@example.com", "verified": true, "avatar": {"name": "newuser", "hash": "f338a2c83bfdde84abe2d3348994d70c34185a234cfbf32f9e323e3578e7e771", "color": "#9edae5", "kind": "user"}, "super_user": false, "enabled": true}]} +---- \ No newline at end of file diff --git a/modules/creating-user-account-quay-ui.adoc b/modules/creating-user-account-quay-ui.adoc new file mode 100644 index 000000000..f3788e2a9 --- /dev/null +++ b/modules/creating-user-account-quay-ui.adoc @@ -0,0 +1,43 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available:_content-type: CONCEPT + +[id="creating-user-account-quay-ui"] += Creating a user account by using the UI + +Use the following procedure to create a new user for your {productname} repository using the UI. + +.Prerequisites + +* You are logged into your {productname} deployment as a superuser. + +.Procedure + +. Log in to your {productname} repository as the superuser. + +. In the navigation pane, select your account name, and then click *Super User Admin Panel*. + +. Click the *Users* icon in the column. + +. Click the *Create User* button. + +. Enter the new user's Username and Email address, and then click the *Create User* button. + +. You are redirected to the *Users* page, where there is now another {productname} user. ++ +[NOTE] +==== +You might need to refresh the *Users* page to show the additional user. +==== + +. On the *Users* page, click the *Options* cogwheel associated with the new user. A drop-down menu appears, as shown in the following figure: ++ +image:user-options.png[Select Options drop-down to change user passwords] + +. Click *Change Password*. + +. Add the new password, and then click *Change User Password*. ++ +The new user can now use that username and password to log in using the web UI or through their preferred container client, like Podman. \ No newline at end of file diff --git a/modules/creating-v2-oauth-access-token.adoc b/modules/creating-v2-oauth-access-token.adoc new file mode 100644 index 000000000..f409d2234 --- /dev/null +++ b/modules/creating-v2-oauth-access-token.adoc @@ -0,0 +1,46 @@ +:_content-type: PROCEDURE +[id="creating-v2-oauth-access-token"] += Creating an OCI referrers OAuth access token + +This OCI referrers OAuth access token is used to list OCI referrers of a manifest under a repository. + +.Procedure + +. Update your `config.yaml` file to include the `FEATURE_REFERRERS_API: true` field. For example: ++ +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: true +# ... +---- + +. Enter the following command to Base64 encode your credentials: ++ +[source,terminal] +---- +$ echo -n ':' | base64 +---- ++ +.Example output ++ +[source,terminal] +---- +abcdeWFkbWluOjE5ODlraWROZXQxIQ== +---- + +. Enter the following command to use the base64 encoded string and modify the URL endpoint to your {productname} server: ++ +[source,terminal] +---- +$ curl --location '/v2/auth?service=&scope=repository:quay/listocireferrs:pull,push' --header 'Authorization: Basic ' -k | jq +---- ++ +.Example output ++ +[source,terminal] +---- +{ + "token": " +} +---- \ No newline at end of file diff --git a/modules/custom-clair-configuration-managed-database.adoc b/modules/custom-clair-configuration-managed-database.adoc index 34f518776..2fd6b1960 100644 --- a/modules/custom-clair-configuration-managed-database.adoc +++ b/modules/custom-clair-configuration-managed-database.adoc @@ -5,7 +5,7 @@ In some cases, users might want to run a custom Clair configuration with a managed Clair database. This is useful in the following scenarios: * When a user wants to disable specific updater resources. -* When a user is running {productname} in an disconnected environment. For more information about running Clair in a disconnected environment, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index#clair-openshift-airgap-database[Configuring access to the Clair database in the air-gapped OpenShift cluster]. +* When a user is running {productname} in an disconnected environment. For more information about running Clair in a disconnected environment, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#clair-disconnected-environments[Clair in disconnected environments]. + [NOTE] ==== diff --git a/modules/database-troubleshooting-issues.adoc b/modules/database-troubleshooting-issues.adoc new file mode 100644 index 000000000..b638e3483 --- /dev/null +++ b/modules/database-troubleshooting-issues.adoc @@ -0,0 +1,261 @@ +:_content-type: CONCEPT +[id="database-troubleshooting-issues"] += Troubleshooting {productname} database issues + +Use the following procedures to troubleshoot the PostgreSQL database. + +//// +[id="checking-deployment-type"] +== Checking the type of deployment + +Check whether your database is deployed as a container on a virtual machine, or deployed on {ocp} as a pod. + +[id="checking-container-pod-status"] +== Checking the container or pod status + +Use the following procedure to check the status of the database pod or container. + +.Procedure + +. Enter the following command to check the status of the pod or container. + +.. If you are using the {productname} Operator on {ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc get pods +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman ps -a +---- +//// + +[id="interact-with-database"] +== Interacting with the {productname} database + +Use the following procedure to interact with the PostgreSQL database. + +[WARNING] +==== +Interacting with the PostgreSQL database is potentially destructive. It is highly recommended that you perform the following procedure with the help of a {productname} Support Specialist. +==== + +[NOTE] +==== +Interacting with the PostgreSQL database can also be used to troubleshoot authorization and authentication issues. +==== + +.Procedure + +. Exec into the {productname} database. + +.. Enter the following commands to exec into the {productname} database pod on {ocp}: ++ +[source,terminal] +---- +$ oc exec -it -- psql +---- + +.. Enter the following command to exec into the {productname} database on a standalone deployment: ++ +[source,terminal] +---- +$ sudo podman exec -it /bin/bash +---- + +. Enter the PostgreSQL shell. ++ +[WARNING] +==== +Interacting with the PostgreSQL database is potentially destructive. It is highly recommended that you perform the following procedure with the help of a {productname} Support Specialist. +==== + +.. If you are using the {productname} Operator, enter the following command to enter the PostgreSQL shell: ++ +[source,terminal] +---- +$ oc rsh psql -U your_username -d your_database_name +---- + +.. If you are on a standalone {productname} deployment, enter the following command to enter the PostgreSQL shell: ++ +[source,terminal] +---- +bash-4.4$ psql -U your_username -d your_database_name +---- + +[id="troubleshooting-crashloop-backoff-state"] +== Troubleshooting crashloopbackoff states + +Use the following procedure to troueblshoot `crashloopbackoff` states. + +.Procedure + +. If your container or pod is in a `crashloopbackoff` state, you can enter the following commands. + +.. Enter the following command to scale down the {productname} Operator: ++ +[source,terminal] +---- +$ oc scale deployment/quay-operator.v3.8.z --replicas=0 +---- ++ +.Example output ++ +[source,terminal] +---- +deployment.apps/quay-operator.v3.8.z scaled +---- + +.. Enter the following command to scale down the {productname} database: ++ +[source,terminal] +---- +$ oc scale deployment/ --replicas=0 +---- ++ +.Example output ++ +[source,terminal] +---- +deployment.apps/ scaled +---- + +.. Enter the following command to edit the {productname} database: ++ +[WARNING] +==== +Interacting with the PostgreSQL database is potentially destructive. It is highly recommended that you perform the following procedure with the help of a {productname} Support Specialist. +==== ++ +[source,terminal] +---- +$ oc edit deployment +---- ++ +[source,yaml] +---- +... + template: + metadata: + creationTimestamp: null + labels: + quay-component: + quay-operator/quayregistry: quay-operator.v3.8.z + spec: + containers: + - env: + - name: POSTGRESQL_USER + value: postgres + - name: POSTGRESQL_DATABASE + value: postgres + - name: POSTGRESQL_PASSWORD + value: postgres + - name: POSTGRESQL_ADMIN_PASSWORD + value: postgres + - name: POSTGRESQL_MAX_CONNECTIONS + value: "1000" + image: registry.redhat.io/rhel8/postgresql-10@sha256:a52ad402458ec8ef3f275972c6ebed05ad64398f884404b9bb8e3010c5c95291 + imagePullPolicy: IfNotPresent + name: postgres + command: ["/bin/bash", "-c", "sleep 86400"] <1> +... +---- +<1> Add this line in the same indentation. ++ +.Example output ++ +[source,terminal] +---- +deployment.apps/ edited +---- + +.. Execute the following command inside of your ``: ++ +[source,terminal] +---- +$ oc exec -it -- cat /var/lib/pgsql/data/userdata/postgresql/logs/* /path/to/desired_directory_on_host +---- + +[id="connectivity-networking"] +== Checking the connectivity between {productname} and the database pod + +Use the following procedure to check the connectivity between {productname} and the database pod + +.Procedure + +. Check the connectivity between {productname} and the database pod. + +.. If you are using the {productname} Operator on {ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it _quay_pod_name_ -- curl -v telnet://:5432 +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it curl -v telnet://:5432 +---- + +[id="check-resource-allocation"] +== Checking resource allocation + +Use the following procedure to check resource allocation. + +.Procedure + +. Obtain a list of running containers. + +. Monitor disk usage of your {productname} deployment. + +.. If you are using the {productname} Operator on {ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it -- df -ah +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it df -ah +---- + +. Monitor other resource usage. + +.. Enter the following command to check resource allocation on a {productname} Operator deployment: ++ +[source,terminal] +---- +$ oc adm top pods +---- + +.. Enter the following command to check the status of a specific pod on a standalone deployment of {productname}: ++ +[source,terminal] +---- +$ podman pod stats +---- + +.. Enter the following command to check the status of a specific container on a standalone deployment of {productname}: ++ +[source,terminal] +---- +$ podman stats +---- ++ +The following information is returned: ++ +* *CPU %*. The percentage of CPU usage by the container since the last measurement. This value represents the container's share of the available CPU resources. +* *MEM USAGE / LIMIT*. The current memory usage of the container followed by its memory limit. The values are displayed in the format `current_usage / memory_limit`. For example, `300.4MiB / 7.795GiB` indicates that the container is currently using 300.4 megabytes of memory out of a limit of 7.795 gigabytes. +* *MEM %*. The percentage of memory usage by the container in relation to its memory limit. +* *NET I/O*. The network I/O (input/output) statistics of the container. It displays the amount of data transmitted and received by the container over the network. The values are displayed in the format: `transmitted_bytes / received_bytes`. +* *BLOCK I/O*. The block I/O (input/output) statistics of the container. It represents the amount of data read from and written to the block devices (for example, disks) used by the container. The values are displayed in the format `read_bytes / written_bytes`. \ No newline at end of file diff --git a/modules/database-troubleshooting.adoc b/modules/database-troubleshooting.adoc new file mode 100644 index 000000000..642976490 --- /dev/null +++ b/modules/database-troubleshooting.adoc @@ -0,0 +1,33 @@ +:_content-type: CONCEPT +[id="database-troubleshooting"] += Troubleshooting the {productname} database + +The PostgreSQL database used for {productname} store various types of information related to container images and their management. Some of the key pieces of information that the PostgreSQL database stores includes: + +* *Image Metadata*. The database stores metadata associated with container images, such as image names, versions, creation timestamps, and the user or organization that owns the image. This information allows for easy identification and organization of container images within the registry. + +* *Image Tags*. {productname} allows users to assign tags to container images, enabling convenient labeling and versioning. The PostgreSQL database maintains the mapping between image tags and their corresponding image manifests, allowing users to retrieve specific versions of container images based on the provided tags. + +* *Image Layers*. Container images are composed of multiple layers, which are stored as individual objects. The database records information about these layers, including their order, checksums, and sizes. This data is crucial for efficient storage and retrieval of container images. + +* *User and Organization Data*. {productname} supports user and organization management, allowing users to authenticate and manage access to container images. The PostgreSQL database stores user and organization information, including usernames, email addresses, authentication tokens, and access permissions. + +* *Repository Information*. {productname} organizes container images into repositories, which act as logical units for grouping related images. The database maintains repository data, including names, descriptions, visibility settings, and access control information, enabling users to manage and share their repositories effectively. + +* *Event Logs*. {productname} tracks various events and activities related to image management and repository operations. These event logs, including image pushes, pulls, deletions, and repository modifications, are stored in the PostgreSQL database, providing an audit trail and allowing administrators to monitor and analyze system activities. + +The content in this section covers the following procedures: + +* *Checking the type of deployment*: Determine if the database is deployed as a container on a virtual machine or as a pod on {ocp}. + +* *Checking the container or pod status*: Verify the status of the `database` pod or container using specific commands based on the deployment type. + +* *Examining the database container or pod logs*: Access and examine the logs of the database pod or container, including commands for different deployment types. + +* *Checking the connectivity between {productname} and the database pod*: Check the connectivity between {productname} and the `database` pod using relevant commands. + +* *Checking the database configuration*: Check the database configuration at various levels ({ocp} or PostgreSQL level) based on the deployment type. + +* *Checking resource allocation*: Monitor resource allocation for the {productname} deployment, including disk usage and other resource usage. + +* *Interacting with the {productname} database*: Learn how to interact with the PostgreSQL database, including commands to access and query databases. diff --git a/modules/default-permissions-api.adoc b/modules/default-permissions-api.adoc new file mode 100644 index 000000000..4ddff5cda --- /dev/null +++ b/modules/default-permissions-api.adoc @@ -0,0 +1,87 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="default-permissions-api"] += Creating and managing default permissions by using the API + +Use the following procedures to manage default permissions using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to create a default permission with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationprototypepermission[`POST /api/v1/organization/{orgname}/prototypes`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" --data '{ + "role": "", + "delegate": { + "name": "", + "kind": "user" + }, + "activating_user": { + "name": "" + } + }' https:///api/v1/organization//prototypes +---- ++ +.Example output ++ +[source,terminal] +---- +{"activating_user": {"name": "test-org+test", "is_robot": true, "kind": "user", "is_org_member": true, "avatar": {"name": "test-org+test", "hash": "aa85264436fe9839e7160bf349100a9b71403a5e9ec684d5b5e9571f6c821370", "color": "#8c564b", "kind": "robot"}}, "delegate": {"name": "testuser", "is_robot": false, "kind": "user", "is_org_member": false, "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}}, "role": "admin", "id": "977dc2bc-bc75-411d-82b3-604e5b79a493"} +---- + +. Enter the following command to update a default permission using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationprototypepermission[`PUT /api/v1/organization/{orgname}/prototypes/{prototypeid}`] endpoint, for example, if you want to change the permission type. You must include the ID that was returned when you created the policy. ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "role": "write" + }' \ + https:///api/v1/organization//prototypes/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"activating_user": {"name": "test-org+test", "is_robot": true, "kind": "user", "is_org_member": true, "avatar": {"name": "test-org+test", "hash": "aa85264436fe9839e7160bf349100a9b71403a5e9ec684d5b5e9571f6c821370", "color": "#8c564b", "kind": "robot"}}, "delegate": {"name": "testuser", "is_robot": false, "kind": "user", "is_org_member": false, "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}}, "role": "write", "id": "977dc2bc-bc75-411d-82b3-604e5b79a493"} +---- + +. You can delete the permission by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationprototypepermission[`DELETE /api/v1/organization/{orgname}/prototypes/{prototypeid}`] command: ++ +[source,terminal] +---- +curl -X DELETE \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/organization//prototypes/ +---- ++ +This command does not return an output. Instead, you can obtain a list of all permissions by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationprototypepermissions[`GET /api/v1/organization/{orgname}/prototypes`] command: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/organization//prototypes +---- ++ +.Example output ++ +[source,terminal] +---- +{"prototypes": []} +---- \ No newline at end of file diff --git a/modules/default-permissions-v2-ui.adoc b/modules/default-permissions-v2-ui.adoc new file mode 100644 index 000000000..feaa61a40 --- /dev/null +++ b/modules/default-permissions-v2-ui.adoc @@ -0,0 +1,35 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="default-permissions-v2-ui"] += Creating and managing default permissions by using the UI + +Default permissions define permissions that should be granted automatically to a repository when it is created, in addition to the default of the repository's creator. Permissions are assigned based on the user who created the repository. + +Use the following procedure to create default permissions using the {productname} v2 UI. + +.Procedure + +. Click the name of an organization. + +. Click *Default permissions*. + +. Click *Create default permissions*. A toggle drawer appears. + +. Select either *Anyone* or *Specific user* to create a default permission when a repository is created. + +.. If selecting *Anyone*, the following information must be provided: ++ +* **Applied to**. Search, invite, or add a user/robot/team. +* **Permission**. Set the permission to one of *Read*, *Write*, or *Admin*. + +.. If selecting *Specific user*, the following information must be provided: ++ +* **Repository creator**. Provide either a user or robot account. +* **Applied to**. Provide a username, robot account, or team name. +* **Permission**. Set the permission to one of *Read*, *Write*, or *Admin*. + +. Click *Create default permission*. A confirmation box appears, returning the following alert: *Successfully created default permission for creator*. diff --git a/modules/deleting-a-tag-api.adoc b/modules/deleting-a-tag-api.adoc new file mode 100644 index 000000000..540bb025e --- /dev/null +++ b/modules/deleting-a-tag-api.adoc @@ -0,0 +1,40 @@ +:_content-type: PROCEDURE +[id="deleting-tags-api"] += Deleting an image by using the API + +You can delete an old image tag by using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. You can delete an image tag by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletefulltag[`DELETE /api/v1/repository/{repository}/tag/{tag}`] command: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository///tag/ +---- ++ +This command does not return output in the CLI. Continue on to the next step to return a list of tags. + +. To see a list of tags after deleting a tag, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] command. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test", "reversion": false, "start_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715697708, "end_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:41:48 -0000", "expiration": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715695488, "end_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:04:48 -0000", "expiration": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715631517, "end_ts": 1715695488, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Mon, 13 May 2024 20:18:37 -0000", "expiration": "Tue, 14 May 2024 14:04:48 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/deleting-a-tag.adoc b/modules/deleting-a-tag.adoc new file mode 100644 index 000000000..7615a5ce1 --- /dev/null +++ b/modules/deleting-a-tag.adoc @@ -0,0 +1,29 @@ +:_content-type: CONCEPT +[id="deleting-a-tag"] += Deleting an image tag + +Deleting an image tag removes that specific version of the image from the registry. + +To delete an image tag, use the following procedure. + +.Procedure + +. On the *Repositories* page of the v2 UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. ++ +[NOTE] +==== +Deleting an image tag can be reverted based on the amount of time allotted assigned to the _time machine_ feature. For more information, see "Reverting tag changes". +==== \ No newline at end of file diff --git a/modules/deleting-an-image-repository-via-the-api.adoc b/modules/deleting-an-image-repository-via-the-api.adoc new file mode 100644 index 000000000..e404cba65 --- /dev/null +++ b/modules/deleting-an-image-repository-via-the-api.adoc @@ -0,0 +1,38 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="deleting-repository-api"] += Deleting a repository by using the {productname} API + +Use the following procedure to delete a repository using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to delete a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleterepository[`DELETE /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "/api/v1/repository//" +---- + +. The CLI does not return information when deleting a repository from the CLI. To confirm deletion, you can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepo[`GET /api/v1/repository/{repository}`] command to see if details are returned for the deleted repository: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +Example output ++ +[source,terminal] +---- +{"detail": "Not Found", "error_message": "Not Found", "error_type": "not_found", "title": "not_found", "type": "http://quay-server.example.com/api/v1/error/not_found", "status": 404} +---- \ No newline at end of file diff --git a/modules/deleting-an-image-repository-via-ui.adoc b/modules/deleting-an-image-repository-via-ui.adoc new file mode 100644 index 000000000..b2dd4aac5 --- /dev/null +++ b/modules/deleting-an-image-repository-via-ui.adoc @@ -0,0 +1,27 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="deleting-repository-v2"] += Deleting a repository by using the UI + +You can delete a repository directly on the UI. + +.Prerequisites + +* You have created a repository. + +.Procedure + +. On the *Repositories* page of the v2 UI, check the box of the repository that you want to delete, for example, `quayadmin/busybox`. + +. Click the *Actions* drop-down menu. + +. Click *Delete*. + +. Type *confirm* in the box, and then click *Delete*. ++ +After deletion, you are returned to the *Repositories* page. \ No newline at end of file diff --git a/modules/deleting-oauth-access-token.adoc b/modules/deleting-oauth-access-token.adoc new file mode 100644 index 000000000..e166139ed --- /dev/null +++ b/modules/deleting-oauth-access-token.adoc @@ -0,0 +1,22 @@ +:_content-type: PROCEDURE +[id="deleting-oauth-access-token"] += Deleting an OAuth 2 access token + +Because OAuth 2 access tokens are created through the OAuth application, they cannot be rotated or renewed. In the event that a token is compromised, or you need to delete a token, you must deleted its associated application through the {productname} UI. + +[IMPORTANT] +==== +Deleting an application deletes all tokens that were made within that specific application. Use with caution. +==== + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +. On the {productname} UI, click the name of the organization hosting the application. Then, in the navigation pane, click *Applications*. + +. Click the application name, for example, *Test application*. + +. In the navigation pane, click *Delete Application*. You are redirected to a new page. Click *Delete application* and confirm your decision. \ No newline at end of file diff --git a/modules/deleting-robot-account-api.adoc b/modules/deleting-robot-account-api.adoc new file mode 100644 index 000000000..e898a299d --- /dev/null +++ b/modules/deleting-robot-account-api.adoc @@ -0,0 +1,65 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="deleting-robot-account-api"] += Deleting a robot account by using the {productname} API + +Use the following procedure to delete a robot account using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to delete a robot account for an organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorgrobot[`DELETE /api/v1/organization/{orgname}/robots/{robot_shortname}`] endpoint: ++ +[source,terminal] +---- +curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//robots/" +---- + +. The CLI does not return information when deleting a robot account with the API. To confirm deletion, you can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorgrobots[`GET /api/v1/organization/{orgname}/robots`] command to see if details are returned for the robot account: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/organization//robots" +---- ++ +Example output ++ +[source,terminal] +---- +{"robots": []} +---- + +. Enter the following command to delete a robot account for the current user with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteuserrobot[`DELETE /api/v1/user/robots/{robot_shortname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots/" +---- + +. The CLI does not return information when deleting a robot account for the current user with the API. To confirm deletion, you can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserrobot[`GET /api/v1/user/robots/{robot_shortname}`] command to see if details are returned for the robot account: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots/" +---- ++ +Example output ++ +[source,terminal] +---- +{"message":"Could not find robot with specified username"} +---- \ No newline at end of file diff --git a/modules/deleting-robot-account-v2-ui.adoc b/modules/deleting-robot-account-v2-ui.adoc new file mode 100644 index 000000000..fe26e2c7f --- /dev/null +++ b/modules/deleting-robot-account-v2-ui.adoc @@ -0,0 +1,26 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="deleting-robot-account-ui"] += Deleting a robot account by using the UI + +Use the following procedure to delete a robot account using the {productname} UI. + +.Procedure + +. Log into your {productname} registry: + +. Click the name of the Organization that has the robot account. + +. Click *Robot accounts*. + +. Check the box of the robot account to be deleted. + +. Click the kebab menu. + +. Click *Delete*. + +. Type `confirm` into the textbox, then click *Delete*. \ No newline at end of file diff --git a/modules/deleting-tag-permanently.adoc b/modules/deleting-tag-permanently.adoc new file mode 100644 index 000000000..8f6938876 --- /dev/null +++ b/modules/deleting-tag-permanently.adoc @@ -0,0 +1,79 @@ +:_content-type: PROCEDURE +[id="deleting-tag-permanently"] += Permanently deleting an image tag + +In some cases, users might want to delete an image tag outside of the time machine window. Use the following procedure to manually delete an image tag permanently. + +[IMPORTANT] +==== +The results of the following procedure cannot be undone. Use with caution. +==== + +[id="permanently-deleting-image-tag-v2-ui"] +== Permanently deleting an image tag using the {productname} v2 UI + +Use the following procedure to permanently delete an image tag using the {productname} v2 UI. + +.Prerequisites + +* You have set `FEATURE_UI_V2` to `true` in your `config.yaml` file. + +.Procedure + +. Ensure that the `PERMANENTLY_DELETE_TAGS` and `RESET_CHILD_MANIFEST_EXPIRATION` parameters are set to `true` in your `config.yaml` file. For example: ++ +[source,yaml] +---- +PERMANENTLY_DELETE_TAGS: true +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +. In the navigation pane, click *Repositories*. + +. Click the name of the repository, for example, *quayadmin/busybox*. + +. Check the box of the image tag that will be deleted, for example, *test*. + +. Click *Actions* -> *Permanently Delete*. ++ +[IMPORTANT] +==== +This action is permanent and cannot be undone. +==== + + +[id="permanently-deleting-image-tag-legacy-ui"] +== Permanently deleting an image tag using the {productname} legacy UI + +Use the following procedure to permanently delete an image tag using the {productname} legacy UI. + +.Procedure + +. Ensure that the `PERMANENTLY_DELETE_TAGS` and `RESET_CHILD_MANIFEST_EXPIRATION` parameters are set to `true` in your `config.yaml` file. For example: ++ +[source,yaml] +---- +PERMANENTLY_DELETE_TAGS: true +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +. On the {productname} UI, click *Repositories* and the name of the repository that contains the image tag you will delete, for example, *quayadmin/busybox*. + +. In the navigation pane, click *Tags*. + +. Check the box of the name of the tag you want to delete, for example, *test*. + +. Click the *Actions* drop down menu and select *Delete Tags* -> *Delete Tag*. + +. Click *Tag History* in the navigation pane. + +. On the name of the tag that was just deleted, for example, `test`, click *Delete test* under the *Permanently Delete* category. For example: ++ +.Permanently delete image tag ++ +image:permanently-delete-image-tag.png[Permanently delete image tag] ++ +[IMPORTANT] +==== +This action is permanent and cannot be undone. +==== diff --git a/modules/deleting-team-within-organization-api.adoc b/modules/deleting-team-within-organization-api.adoc new file mode 100644 index 000000000..e4e51b8ea --- /dev/null +++ b/modules/deleting-team-within-organization-api.adoc @@ -0,0 +1,28 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="deleting-team-within-organization-api"] += Deleting a team within an organization by using the API + +Use the following procedure to delete a team within an organization by using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* You can delete a team within an organization by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationteam[`DELETE /api/v1/organization/{orgname}/team/{teamname}`] command: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team/" +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/deleting-user-cli-api.adoc b/modules/deleting-user-cli-api.adoc new file mode 100644 index 000000000..1598bffda --- /dev/null +++ b/modules/deleting-user-cli-api.adoc @@ -0,0 +1,42 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="deleting-user-cli-api"] += Deleting a user by using the {productname} API + +Use the following procedure to delete a user from {productname} using the API. + +[IMPORTANT] +==== +After deleting the user, any repositories that this user had in his private account become unavailable. +==== + +.Prerequisites + +* You are logged into your {productname} deployment as a superuser. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteinstalluser[`DELETE /api/v1/superuser/users/{username}`] command to delete a user from the command line: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " https:///api/v1/superuser/users/ +---- + +. The CLI does not return information when deleting a user from the CLI. To confirm deletion, you can check the {productname} UI by navigating to *Superuser Admin Panel* -> *Users*, or by entering the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listallusers[`GET /api/v1/superuser/users/`] command. You can then check to see if they are present. ++ +[NOTE] +==== +The `GET /api/v1/superuser/users/` endpoint only returns users and superusers if `AUTHENTICATION_TYPE: Database` is set in your `config.yaml` file. It does not work for `LDAP` authentication types. +==== ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/superuser/users/" +---- diff --git a/modules/deleting-user-cli.adoc b/modules/deleting-user-cli.adoc new file mode 100644 index 000000000..66b858b7b --- /dev/null +++ b/modules/deleting-user-cli.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="deleting-user-cli"] += Deleting a {productname} user + +You can delete a user on the {productname} UI or by using the {productname} API. + +[NOTE] +==== +In some cases, when accessing the *Users* tab in the *Superuser Admin Panel* of the {productname} UI, you might encounter a situation where no users are listed. Instead, a message appears, indicating that {productname} is configured to use external authentication, and users can only be created in that system. + +This error occurs for one of two reasons: + +* The web UI times out when loading users. When this happens, users are not accessible to perform any operations on. +* On LDAP authentication. When a userID is changed but the associated email is not. Currently, {productname} does not allow the creation of a new user with an old email address. + +When this happens, you must delete the user using the {productname} API. +==== \ No newline at end of file diff --git a/modules/deleting-user-ui.adoc b/modules/deleting-user-ui.adoc new file mode 100644 index 000000000..d737ec66c --- /dev/null +++ b/modules/deleting-user-ui.adoc @@ -0,0 +1,38 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="deleting-user-ui"] += Deleting a user by using the UI + +Use the following procedure to delete a user from your {productname} repository using the UI. Note that after deleting the user, any repositories that the user had in their private account become unavailable. + +[NOTE] +==== +In some cases, when accessing the *Users* tab in the *Superuser Admin Panel* of the {productname} UI, you might encounter a situation where no users are listed. Instead, a message appears, indicating that {productname} is configured to use external authentication, and users can only be created in that system. + +This error occurs for one of two reasons: + +* The web UI times out when loading users. When this happens, users are not accessible to perform any operations on. +* On LDAP authentication. When a userID is changed but the associated email is not. Currently, {productname} does not allow the creation of a new user with an old email address. + +When this happens, you must delete the user using the {productname} API. +==== + +.Prerequisites + +* You are logged into your {productname} deployment as a superuser. + +.Procedure + +. Log in to your {productname} repository as the superuser. + +. In the navigation pane, select your account name, and then click *Super User Admin Panel*. + +. Click the *Users* icon in the navigation pane. + +. Click the *Options* cogwheel beside the user to be deleted. + +. Click *Delete User*, and then confirm deletion by clicking *Delete User*. \ No newline at end of file diff --git a/modules/deploy-local-quay-ipv6.adoc b/modules/deploy-local-quay-ipv6.adoc new file mode 100644 index 000000000..0ed30e136 --- /dev/null +++ b/modules/deploy-local-quay-ipv6.adoc @@ -0,0 +1,31 @@ +:_content-type: PROCEDURE +[id="deploy-local-quay-ipv6"] += Deploying a local instance of {productname} in IPv6 + +After you have created a new dual-stack container network, you can deploy a local instance of {productname}. + +.Prerequisites + +* You have configured Redis, your PostgreSQL database, local image storage, and creating a `config.yaml` file with the desired settings. + +.Procedure + +. In your {productname} `config.yaml` file, set the `FEATURE_LISTEN_IP_VERSION` field to `IPv6`. For example: ++ +[source,yaml] +---- +# ... +FEATURE_LISTEN_IP_VERSION: dual-stack +# ... +---- + +. Enter the following command to start the `Quay` registry container, specifying the appropriate volumes for configuration data and local storage for image data. Note that if you are using dual-stack, you must specify explicit IPv6 port mapping when starting the container. ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm -p "[::]:80:8080" -p "[::]:443:8443" \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- \ No newline at end of file diff --git a/modules/disable-oci-artifacts-in-quay.adoc b/modules/disable-oci-artifacts-in-quay.adoc index 39f038371..f27e6e510 100644 --- a/modules/disable-oci-artifacts-in-quay.adoc +++ b/modules/disable-oci-artifacts-in-quay.adoc @@ -1,10 +1,14 @@ -[[disable-oci-artifacts-in-quay]] -= Disabling OCI artifacts in Quay +:_content-type: REFERENCE +[id="disable-oci-artifacts-in-quay"] += Disabling OCI artifacts in {productname} -If you want to disable OCI artifact support, you can set `FEATURE_GENERAL_OCI_SUPPORT` to `False` in your config.yaml: +Use the following procedure to disable support for OCI artifacts. -.... -... -FEATURE_GENERAL_OCI_SUPPORT = False -... -.... +.Procedure + +* Disable OCI artifact support by setting `FEATURE_GENERAL_OCI_SUPPORT` to `false` in your `config.yaml` file. For example: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT = false +---- \ No newline at end of file diff --git a/modules/disabling-robot-account.adoc b/modules/disabling-robot-account.adoc new file mode 100644 index 000000000..c0a2cf5cb --- /dev/null +++ b/modules/disabling-robot-account.adoc @@ -0,0 +1,123 @@ +:_content-type: CONCEPT +[id="disabling-robot-account"] += Disabling robot accounts by using the UI + +{productname} administrators can manage robot accounts by disallowing users to create new robot accounts. + +[IMPORTANT] +==== +Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` breaks mirroring configurations. Users mirroring repositories should not set `ROBOTS_DISALLOW` to `true` in their `config.yaml` file. This is a known issue and will be fixed in a future release of {productname}. +==== + +//// +Additionally, {productname} administrators can add robot accounts to allowlists when disallowing the creation of new robot accounts. This ensures operability of approved robot accounts and a seamless workflow in mirroring configurations. + + +[IMPORTANT] +==== +Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` without allowlisting supplementary robot accounts breaks mirroring configurations. You must allowlist robot accounts with the `ROBOTS_WHITELIST` variable when managing robot accounts with the `ROBOTS_DISALLOW` field. +==== +//// + +Use the following procedure to disable robot account creation. + +.Prerequisites + +* You have created multiple robot accounts. + +.Procedure + +. Update your `config.yaml` field to add the `ROBOTS_DISALLOW` variable, for example: ++ +[source,yaml] +---- +ROBOTS_DISALLOW: true +---- + +. Restart your {productname} deployment. + +.Verification: Creating a new robot account + +. Navigate to your {productname} repository. + +. Click the name of a repository. + +. In the navigation pane, click *Robot Accounts*. + +. Click *Create Robot Account*. + +. Enter a name for the robot account, for example, `+`. + +. Click *Create robot account* to confirm creation. The following message appears: `Cannot create robot account. Robot accounts have been disabled. Please contact your administrator.` + +//// +.Verification: Pushing an image with an allowlisted robot account + +. On the command-line interface (CLI) log in as one of the allowlisted robot accounts by entering the following command: ++ +[source,terminal] +---- +$ podman login -u="+" -p="KETJ6VN0WT8YLLNXUJJ4454ZI6TZJ98NV41OE02PC2IQXVXRFQ1EJ36V12345678" +---- + +. Enter the following command to pull an example image: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test +---- + +. Push the image by entering the following command: ++ +[source,terminal] +---- +$ podman push --tls-verify=false //busybox:test +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 702a604e206f skipped: already exists +Copying config a416a98b71 done +Writing manifest to image destination +Storing signatures +---- +//// + +.Verification: Logging into a robot account + +. On the command-line interface (CLI), attempt to log in as one of the robot accounts by entering the following command: ++ +[source,terminal] +---- +$ podman login -u="+" -p="KETJ6VN0WT8YLLNXUJJ4454ZI6TZJ98NV41OE02PC2IQXVXRFQ1EJ36V12345678" +---- ++ +The following error message is returned: ++ +[source,terminal] +---- +Error: logging into "": invalid username/password +---- + +. You can pass in the `log-level=debug` flag to confirm that robot accounts have been deactivated: ++ +[source,terminal] +---- +$ podman login -u="+" -p="KETJ6VN0WT8YLLNXUJJ4454ZI6TZJ98NV41OE02PC2IQXVXRFQ1EJ36V12345678" --log-level=debug +---- ++ +[source,terminal] +---- +... +DEBU[0000] error logging into "quay-server.example.com": unable to retrieve auth token: invalid username/password: unauthorized: Robot accounts have been disabled. Please contact your administrator. +---- \ No newline at end of file diff --git a/modules/discovering-quay-api-endpoints.adoc b/modules/discovering-quay-api-endpoints.adoc new file mode 100644 index 000000000..f0ab60fb3 --- /dev/null +++ b/modules/discovering-quay-api-endpoints.adoc @@ -0,0 +1,30 @@ +:_content-type: PROCEDURE +[id="discovering-quay-api-endpoints"] += Discovering {productname} API endpoints + +{productname} API endpoints are discoverable by using the API. + +Use the following procedure to discover available API endpoints. + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +* Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#discovery_2[`GET /api/v1/discovery`] command to list all of the API endpoints available in the swagger API format: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/discovery?query=true" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +--- +: "Manage the tags of a repository."}, {"name": "team", "description": "Create, list and manage an organization's teams."}, {"name": "trigger", "description": "Create, list and manage build triggers."}, {"name": "user", "description": "Manage the current user."}, {"name": "userfiles", "description": ""}]} +--- +---- diff --git a/modules/docker-failing-pulls.adoc b/modules/docker-failing-pulls.adoc new file mode 100644 index 000000000..b110ccab6 --- /dev/null +++ b/modules/docker-failing-pulls.adoc @@ -0,0 +1,34 @@ +:_content-type: CONCEPT +[id="docker-failing-pulls"] += Docker resulting in failing pulls + +In some cases, using `docker pull` might return the following error: `39cb5a2eab5d: Error pulling image (myimage) from quay.io/my/repository. . . Could not find repository on any of the indexed registries.` There are two reasons for receiving this error. + +* *Linux kernel bug on Ubuntu Precise Pangolin (12.04 LTS) (64-bit).* Precise has a Linux kernel bug that must be updated to use Docker. Use the following commands to update and reboot Precise. ++ +[source,terminal] +---- +$ sudo apt-get update +---- ++ +[source,terminal] +---- +$ sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring +---- ++ +[source,terminal] +---- +$ sudo reboot +---- + +* *Missing AUFS on Raring 13.04 and Saucy 13.10 (64-bit).* Not all installations of Ubuntu 13.04 or 13.10 include AUFS enabled. Enter the following commands to install additional Linux kernel modules: ++ +[source,terminal] +---- +$ sudo apt-get update +---- ++ +[source,terminal] +---- +$ sudo apt-get install linux-image-extra-`uname -r` +---- \ No newline at end of file diff --git a/modules/docker-io-timeout.adoc b/modules/docker-io-timeout.adoc new file mode 100644 index 000000000..ffeb7d17d --- /dev/null +++ b/modules/docker-io-timeout.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="docker-io-timeout"] += Docker is returning an i/o timeout + +In some cases, interacting with a remote repository yields an i/o timeout. For example: + +[source,terminal] +---- +$ sudo docker pull ... +---- +.Example output +[source,terminal] +---- +FATA[0021] Error response from daemon: v1 ping attempt failed with error: Get https://quay.io/v1/_ping: dial tcp: i/o timeout. +---- + +If you are running an older version of Docker, for example, 1.7 or before, this issue was related to DNS. Try restarting the Docker daemon process. If that solution does not work, you can try rebooting. For more information about this issue, see link:https://github.com/docker/docker/issues/13337[Ambiguous i/o timeouts] + +If you are running Docker 1.8 or greater, the issue is related to network infrastructure, and is likely the product of latency between the client and the registry. Ensure that there are no proxies in between the client and the registry, and that the two are geographically close, to resolve the issue. \ No newline at end of file diff --git a/modules/docker-login-error.adoc b/modules/docker-login-error.adoc new file mode 100644 index 000000000..81e7dd698 --- /dev/null +++ b/modules/docker-login-error.adoc @@ -0,0 +1,21 @@ +:_content-type: CONCEPT +[id="docker-login-error"] += Docker login is failing + +In some cases, Docker fails with the following error: `2014/01/01 12:00:00 Error: Invalid Registry endpoint: Get https://quay.io/v1/_ping: dial tcp: ping timeout`. This occurs for one of three reasons: + +* *You are on a high-latency, slow connection.* Docker has defined a maximum timeout of five seconds before timeout occurs. Currently, the only solution is to find a connection with better latency. + +* *Docker on OSX (through boot2docker) is out of sync.* If you are using Docker on OSX through link:https://github.com/boot2docker/boot2docker[boot2docker], the networking stack can get out of sync. To fix it, restart the `boot2docker` image. For example: ++ +[source,terminal] +---- +$ boot2docker restart +---- ++ +Alternatively, because Docker-machine supersedes boot2docker on OSX, you might need to restart Docker-machine: ++ +[source,terminal] +---- +$ docker-machine restart default +---- \ No newline at end of file diff --git a/modules/docker-timestamp-error.adoc b/modules/docker-timestamp-error.adoc new file mode 100644 index 000000000..dd6068466 --- /dev/null +++ b/modules/docker-timestamp-error.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="docker-timestamp-error"] += Incorrect timestamp + +In some cases, using `docker push` might show the incorrect timestamp for one or more images. In most cases, this means that your machine or virtual machine's time has become desynchronized. + +This occurs because the timestamp shown for the _Changed_ file is generated by the Docker client when the image is created. If the time on the machine on which the image was built is out of sync, the timestamp shown is different on {productname} as well. Usually, this means that your machine needs to be synced with the link:http://www.ntp.org/[Network Time Protocol]. + +Enter the following command to force the Docker virtual machine to synchronize its clock: +[source,terminal] +---- +$ docker ssh -C 'sudo ntpclient -s -h pool.ntp.org' +---- \ No newline at end of file diff --git a/modules/downgrade-quay-deployment.adoc b/modules/downgrade-quay-deployment.adoc index 63934f2bf..502d09e9a 100644 --- a/modules/downgrade-quay-deployment.adoc +++ b/modules/downgrade-quay-deployment.adoc @@ -3,7 +3,7 @@ [id="downgrade-quay-deployment"] = Downgrading {productname} -{productname} only supports rolling back, or downgrading, to previous z-stream versions, for example, 3.7.2 -> 3.7.1. Rolling back to previous y-stream versions (3.7.0 -> 3.6.0) is not supported. This is because {productname} updates might contain database schema upgrades that are applied when upgrading to a new version of {productname}. Database schema upgrades are not considered backwards compatible. +{productname} only supports rolling back, or downgrading, to previous z-stream versions, for example, 3.12.3 -> 3.12.2. Rolling back to previous y-stream versions ({producty} -> {producty-n1}) is not supported. This is because {productname} updates might contain database schema upgrades that are applied when upgrading to a new version of {productname}. Database schema upgrades are not considered backwards compatible. [IMPORTANT] ==== diff --git a/modules/enabling-team-sync-oidc.adoc b/modules/enabling-team-sync-oidc.adoc new file mode 100644 index 000000000..edcb0d444 --- /dev/null +++ b/modules/enabling-team-sync-oidc.adoc @@ -0,0 +1,214 @@ +:_content-type: PROCEDURE +[id="oidc-team-sync"] += Team synchronization for {productname} OIDC deployments + +Administrators can leverage an OpenID Connect (OIDC) identity provider that supports group or team synchronization to apply repository permissions to sets of users in {productname}. This allows administrators to avoid having to manually create and sync group definitions between {productname} and the OIDC group. + +:_content-type: PROCEDURE +[id="enabling-oidc-team-sync"] +== Enabling synchronization for {productname} OIDC deployments + +Use the following procedure to enable team synchronization when your {productname} deployment uses an OIDC authenticator. + +[IMPORTANT] +==== +The following procedure does not use a specific OIDC provider. Instead, it provides a general outline of how best to approach team synchronization between an OIDC provider and {productname}. Any OIDC provider can be used to enable team synchronization, however, setup might vary depending on your provider. +==== + +.Procedure + +. Update your `config.yaml` file with the following information: ++ +[source,yaml] +---- +AUTHENTICATION_TYPE: OIDC +# ... +OIDC_LOGIN_CONFIG: + CLIENT_ID: <1> + CLIENT_SECRET: <2> + OIDC_SERVER: <3> + SERVICE_NAME: <4> + PREFERRED_GROUP_CLAIM_NAME: <5> + LOGIN_SCOPES: [ 'openid', '' ] <6> + OIDC_DISABLE_USER_ENDPOINT: false <7> +# ... +FEATURE_TEAM_SYNCING: true <8> +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: true <9> +FEATURE_UI_V2: true +# ... +---- +<1> Required. The registered OIDC client ID for this {productname} instance. +<2> Required. The registered OIDC client secret for this {productname} instance. +<3> Required. The address of the OIDC server that is being used for authentication. This URL should be such that a `GET` request to `/.well-known/openid-configuration` returns the provider's configuration information. This configuration information is essential for the relying party (RP) to interact securely with the OpenID Connect provider and obtain necessary details for authentication and authorization processes. +<4> Required. The name of the service that is being authenticated. +<5> Required. The key name within the OIDC token payload that holds information about the user's group memberships. This field allows the authentication system to extract group membership information from the OIDC token so that it can be used with {productname}. +<6> Required. Adds additional scopes that {productname} uses to communicate with the OIDC provider. Must include `'openid'`. Additional scopes are optional. +<7> Whether to allow or disable the `/userinfo` endpoint. If using Azure Entra ID, set this field to `true`. Defaults to `false`. +<8> Required. Whether to allow for team membership to be synced from a backing group in the authentication engine. +<9> Optional. If enabled, non-superusers can setup team synchronization. + +. Restart your {productname} registry. + +[id="setting-up-quay-team-sync"] +== Setting up your {productname} deployment for team synchronization + +. Log in to your {productname} registry via your OIDC provider. + +. On the {productname} v2 UI dashboard, click *Create Organization*. + +. Enter and Organization name, for example, `test-org`. + +. Click the name of the Organization. + +. In the navigation pane, click *Teams and membership*. + +. Click *Create new team* and enter a name, for example, `testteam`. + +. On the *Create team* pop-up: + +.. Optional. Add this team to a repository. +.. Add a team member, for example, `user1`, by typing in the user's account name. +.. Add a robot account to this team. This page provides the option to create a robot account. + +. Click *Next*. + +. On the *Review and Finish* page, review the information that you have provided and click *Review and Finish*. + +. To enable team synchronization for your {productname} OIDC deployment, click *Enable Directory Sync* on the *Teams and membership* page. + +. You are prompted to enter the group Object ID if your OIDC authenticator is Azure Entra ID, or the group name if using a different provider. Note the message in the popup: ++ +[WARNING] +==== +Please note that once team syncing is enabled, the membership of users who are already part of the team will be revoked. OIDC group will be the single source of truth. This is a non-reversible action. Team's user membership from within Quay will be ready-only. +==== + +. Click *Enable Sync*. + +. You are returned to the *Teams and membership* page. Note that users of this team are removed and are re-added upon logging back in. At this stage, only the robot account is still part of the team. ++ +A banner at the top of the page confirms that the team is synced: ++ +[source,text] +---- +This team is synchronized with a group in OIDC and its user membership is therefore read-only. +---- ++ +By clicking the *Directory Synchronization Config* accordion, the OIDC group that your deployment is synchronized with appears. + +. Log out of your {productname} registry and continue on to the verification steps. + +.Verification + +Use the following verification procedure to ensure that `user1` appears as a member of the team. + +. Log back in to your {productname} registry. + +. Click *Organizations* -> *test-org* -> *test-team* *Teams and memberships*. `user1` now appears as a team member for this team. + +.Verification + +Use the following procedure to remove `user1` from a group via your OIDC provider, and subsequently remove them from the team on {productname}. + +. Navigate to your OIDC provider's administration console. + +. Navigate to the *Users* page of your OIDC provider. The name of this page varies depending on your provider. + +. Click the name of the user associated with {productname}, for example, `user1`. + +. Remove the user from group in the configured identity provider. + +. Remove, or unassign, the access permissions from the user. + +. Log in to your {productname} registry. + +. Click *Organizations* -> *test-org* -> *test-team* *Teams and memberships*. `user1` has been removed from this team. + +//// +[id="setting-up-keycloak-oidc-team-sync"] +== Setting up Keycloak for OIDC team synchronization + +Keycloak is an open source software product to allow single sign-on with identity and access management. It can be leveraged with {productname} as an extra layer of security for your deployment. + +Use the following procedure to setup Keycloak for {productname} team synchronization. + +.Procedure + +. Log in to your Keycloak adminstration console. + +. In the navigation pane, click the drop down menu, and then click *Create realm*. + +. Provide a realm name, for example, `quayrealm`. + +. Click *Clients* -> *Create client*. + +. On the *General settings* page: + +.. Set the Client type to *OpenID Connect*. +.. Provide a Client ID, for example, `quaydev`. +.. Optional. Provide a name for the client. +.. Optional. Provide a description for the client. +.. Optional. Specify whether the client is always listed in the Account UI. + +. Click *Next*. + +. On the *Capability config* page: + +.. Ensure that *Client authentication* is on. +.. Optional. Turn *Authorization* on. +.. For *Authentication flow*, click *Standard flow* and *Direct access grants*. + +. Click *Next*. + +. On the *Login settings* page: + +.. Optional. Provide a Root URL. +.. Optional. Provide a Home URL. +.. Optional. Provide Valid redirect URIs. +.. Optional. Provide Valid post logout redirect URIs. +.. Optional. Provide Web origins. + +. Click *Save*. You are redirected to the *quaydev* *Settings* page. + +. In the navigation pane, click *Realm roles* -> *Create role*. + +. Enter a role name, for example, `test-team-sync`. Then, click *Save*. + +. In the navigation pane, click *Groups* -> *Create a group*. + +. Enter a name for the group, for example, `oidc-sync-test`. + +. In the navigation pane, click *Users* -> *Create new user*. + +. Enter a username, for example, `test`. + +. Click *Join Groups* and add this user to the `oidc-sync-test` group. + +. Click *Create*. + +. In the navigation pane, click *Clients*. + +. Click the name of the Client ID created earlier, for example, *quay-dev*. + +. On the *Client details* page, click *Client scopes*. + +. Click name of the client scope ID, for example, *quaydev-dedicated*. + +. Click *Configure a new mapper*. This mapper allows groups to be returned from the user information endpoint. + +. Select *User Realm Role*. + +. On the *Add mapper* page, provide the following information: + +.. Enter a name for the mapper, for example, `group`. +.. Enter a Token Claim Name, for example, `groupName`. User groups are returned under this key name. It is used in your {productname} configuration. +.. Click to turn Add to ID token `Off`. +.. Click to turn Add to access token `Off`. +.. Ensure that Add to userinfo is `On`. + +. Click *Save*. + + +[id="configuring-oidc-team-synchronization"] +== Configuring team synchronization for OIDC deployments +//// \ No newline at end of file diff --git a/modules/enabling-using-the-api.adoc b/modules/enabling-using-the-api.adoc new file mode 100644 index 000000000..6e573d44a --- /dev/null +++ b/modules/enabling-using-the-api.adoc @@ -0,0 +1,20 @@ +:_content-type: REFERENCE +[id="enabling-using-the-api"] += Enabling and using the {productname} API + +By leveraging the {productname} API, you can streamline container registry management, automate tasks, and integrate {productname}'s functionalities into your existing workflow. This can improve efficiency, offer enhanced flexibility (by way of repository management, user management, user permissions, image management, and so on), increase the stability of your organization, repository, or overall deployment, and more. + +ifeval::["{context}" == "use-quay"] +Detailed instructions for how to use the {productname} API can be found in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API guide]. In that guide, the following topics are covered: + +* {productname} token types, including OAuth 2 access tokens, robot account tokens, and OCI referrers tokens, and how to generate these tokens. +* Enabling the {productname} API by configuring your `config.yaml` file. +* How to use the {productname} API by passing in your OAuth 2 account token into the desired endpoint. +* API examples, including one generic example of how an administrator might automate certain tasks. + +See the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API guide] before attempting to use the API endpoints offered in this chapter. +endif::[] + +ifeval::["{context}" == "use-api"] +The following sections explain how to enable and use the {productname} API. +endif::[] diff --git a/modules/error-403-troubleshooting.adoc b/modules/error-403-troubleshooting.adoc new file mode 100644 index 000000000..e8e77b07b --- /dev/null +++ b/modules/error-403-troubleshooting.adoc @@ -0,0 +1,40 @@ +:_content-type: CONCEPT +[id="error-403-troubleshooting"] += Troubleshooting HTTP status code 403 + +HTTP status code `403` occurs when a user does not have the necessary permissions to access certain resources of the server, such as files, directories, API endpoints, or authenticated content. For some users, this might occur when attempting to push or pull content from {productname}, even after successfully logging in with Docker or Podman. + +Use the following sections to troubleshoot the various reasons for receiving an HTTP status code `403`. + +[id="centos-seven"] +== CentOS 7 + +CentOS 7, released 2014-07-07, introduced a custom build of Docker with a known issue that prevents logging into private registries. As a workaround for this issue, upgrade CentOS to version 8, or upgrade your version of Docker. + +For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1209439[Docker fails to authenticate against additional registries]. + +[id="docker-zero-eight-one"] +== Docker version 0.8.1 + +Docker version 0.8.1 introduced a bug in its storage of authentication credentials in the `.dockercfg` file that resulted in no credentials being sent to Quay.io, despite a successful login. + +As a workaround for this issue, upgrade your version of Docker. + +For more information, see link:https://github.com/moby/moby/issues/4267[Dockercfg registry endpoint format invalid in v0.8.1]. + +[id="docker-execution-environment"] +== Docker is being executed in a different environment + +Docker stores the credentials that it uses for pushing and pulling in a file that is usually placed in the `$HOME/.docker/config.json` folder. If you are executing Docker in another environment, such as a scripted `docker build`, a virtual machine, `makefile`, `virtualenv`, and so on, Docker cannot find the `config.json` file and fails. + +As a workaround, verify that the `config.json` file is accessible to the environment which is performing the push or pull commands. + +[id="repository-permissions"] +== Insufficient repository permissions + +Ensure that your user, robot account, or token has the necessary permissions on the repository. Permissions on a repository can be edited from the *Settings* -> *Repository settings* page. + +[NOTE] +==== +If you are trying to pull or push an organization repository, your account must either have the correct permissions, or you must be a member of a team. +==== \ No newline at end of file diff --git a/modules/error-406-dockerfile.adoc b/modules/error-406-dockerfile.adoc new file mode 100644 index 000000000..446ef1000 --- /dev/null +++ b/modules/error-406-dockerfile.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="error-406-dockerfile"] += Base image pull in Dockerfile fails with HTTP error 403 + +In some cases, you might receive an HTTP error `403` when attempting to use a private base image as the `FROM` line in a Build Trigger. To use a private base image as the `FROM` lin in a Build Trigger, credentials for your robot account with _read access to the private image_ must be specified when setting up the Build Trigger. + +For more information about robot accounts and Build Triggers, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#github-build-triggers[Setting up Github Build Trigger tags]. \ No newline at end of file diff --git a/modules/error-429-troubleshooting.adoc b/modules/error-429-troubleshooting.adoc new file mode 100644 index 000000000..2e8e5e7a7 --- /dev/null +++ b/modules/error-429-troubleshooting.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="error-429-troubleshooting"] += Troubleshooting HTTP status code 429 + +HTTP status code `429` indicates that the user has sent too many requests in a given amount of time. If you are receiving this status code, it means that {productname} has reached its maximum capacity for requests per-second, per-IP address. To resolve this issue, you can take the following steps: + +* Reduce the frequency or pace at which you are sending requests to your {productname} registry. This helps ensure that you stay within the allowed limits and avoid triggering a `429` response. + +* Implement a back-off strategy to wait and retry the request after a certain period of time. Back-off strategies involve increasing the waiting time between subsequent requests. This gives the server enough time to process previous requests, which avoids overwhelming the server. + +* Use caching mechanisms to store and reuse frequently accessed data from the {productname} registry. This can help reduce the need for repeated requests and improve overall performance. \ No newline at end of file diff --git a/modules/error-500-troubleshooting.adoc b/modules/error-500-troubleshooting.adoc new file mode 100644 index 000000000..e6b4a0d3f --- /dev/null +++ b/modules/error-500-troubleshooting.adoc @@ -0,0 +1,31 @@ +:_content-type: CONCEPT +[id="error-500-troubleshooting"] += Troubleshooting HTTP status code 500 + +In some cases, users are unable to push or pull images from their {productname} registry, or cannot access the {productname} web UI. The received error message, HTTP error `500`, indicates that the database connections are exhausted. As a result, the database influences the service key renewal that is used for internal communication and the signing of requests made to the Docker v2 API. Consequently, the registry falls back to the Docker v1 API, which has been deprecated, and returns HTTP error `500` + +To resolve this issue, you can increase the database connection count by using the following procedure. + +.Procedure + +. Optional. For an immediate solution, you can force start the `Quay` container. Restarting the container helps resolve the issue because, on each restart, {productname} creates a new service key. These keys have a life of 2 hours and are regularly rotated. + +. Navigate to your `/var/lib/pgsql/data/postgresql.conf` file. + +. Increase the database connection count by updating the `max_connections` variable. It is recommended to set the number of connections on the database to at least `1000` for a development cluster, and `2000` for a production cluster. In some cases you might need more. For example: ++ +[source,yaml] +---- +max_connections = 1000 +---- ++ +[IMPORTANT] +==== +You should consult with your database team before making any changes to this field. +==== + + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6988741[Troubleshooting Quay Database]. diff --git a/modules/error-502-troubleshooting.adoc b/modules/error-502-troubleshooting.adoc new file mode 100644 index 000000000..0ef77839b --- /dev/null +++ b/modules/error-502-troubleshooting.adoc @@ -0,0 +1,172 @@ +:_content-type: CONCEPT +[id="error-502-troubleshooting"] += Troubleshooting HTTP status code 502 + +In some cases, {productname} users might receive the following HTTP status code when attemping to pull or push an image with Podman: `invalid status code from registry 502 (Bad Gateway)`. Code `502` indicates a a problem with the communication between two serves. This error commonly occurs when a server acting as a gateway or a proxy receives an invalid response from an upstream server. + +The primary solution when receiving this error is to restart your {productname} deployment to clear locked-up worker nodes, clean up temporary files or caches, or to resolve other transient issues. Restarting {productname} can help resolve many problematic states. In some cases, more thorough troubleshooting must be done. + +[id="restart-standalone-quay"] +== Restarting a standalone {productname} deployment + +Use the following procedure to restart a standalone {productname} deployment. + +.Procedure + +* Enter the following command to restart your {productname} container: ++ +[source,terminal] +---- +$ podman restart +---- + +[id="restart-quay-operator"] +== Restarting the {productname} Operator + +Use the following procedure to restart your {productname} Operator + +* Enter the following command to restart your {productname} Operator: ++ +[source,terminal] +---- +$ oc delete pod quay-app +---- + +[id="integer-502-issue"] +== Integer out of range + +Some `502` error messages might occur because the garbage collection worker fails when collecting images whose value is too high. Other `502` error messages might occurs because PostgreSQL cannot handle an integer bigger than 2147483647, which causes the garbage collection worker to fail. Running {productname} in debug mode can reveal additional information about the `502` error. + +If debug mode reveals the error `peewee.DataError: integer out of range`, it means there is an issue with the range of an integer value in the context of the Peewee Object-Relational Mapping (ORM) library. This error occurs when an attempt is made to store an integer value that is outside of the valid range for the corresponding database column; this is often caused when a user sets their tag expiration too high, which causes the garbage collection worker to fail when collecting images. Each database has its own limits on the range of integer values it can store. PostgreSQL can store values from -2147483648 to 2147483647. + +If you run {productname} in debug mode and the error `peewee.DataError: integer out of range` is returned, use the following steps for troubleshooting. + +.Procedure + +. In most cases, a {productname} administrator can resolve this error by setting the `FEATURE_CHANGE_TAG_EXPIRATION` configuration field to `false` in their `config.yaml` file. ++ +[NOTE] +==== +This change affects all users of your organization and disables them from setting tag expirations themselves. +==== + +. Alternatively, you can request the user or owner of the repository in question to either remove, or change, the tag expiration manually. If they do not respond, you can execute the following steps: ++ +** Obtain information from the user table: ++ +[source,terminal] +---- +$ SELECT username, removed_tag_expiration_s FROM "user" WHERE id = (SELECT namespace_user_id FROM repository WHERE id = ); +---- ++ +** Update the user or owner of the repository in question and set the `default tag expiration` for that user to two weeks: ++ +[source,terminal] +---- +$ UPDATE "user" SET removed_tag_expiration_s = 1209600 WHERE id = (SELECT namespace_user_id FROM repository WHERE id = ); +---- + +[id="troubleshooting-502-pull"] +== Troubleshooting 502 Podman pull errors + +In some cases, the following error might be returned when using `podman pull`: `Error: error pulling image " /:": unable to pull /:: unable to pull image: Error parsing image configuration: Error fetching blob: invalid status code from registry 502 (Bad Gateway)`. This error primarily occurred in {productname} versions 3.7 and earlier. It has been resolved in {productname} 3.7.1 and later. + +If you are using an earlier version of {productname}, the error occurs because the installation script `cert_install.sh` do not have a new line at the end of the file. To resolve this issue, you can manually add a new line at the end of your `cert_install.sh` file by simply going to the end of the file, pressing `enter`, saving the file, and then reuploading it to {productname}. + +[id="troubleshooting-502-push"] +== Troubleshooting 502 Podman push errors + +In some cases, the following error might be returned when using `podman push`: `Error: Error writing blob: Error initiating layer upload to /v2/repo/image/blobs/uploads/ in : received unexpected HTTP status: 502 Bad Gateway`. This issue is caused by either the NooBaa certificate rotation, or the service signing root CA rotation. The workaround for this issue is to manually add a new certificate chain to {productname}'s deployment after it has rotated. + +.Procedure + +. Download the new certificate chain for your NooBaa endpoint by entering the following command: ++ +[source,terminal] +---- +$ oc exec -it quay-quay-pod-name -- openssl s_client -connect s3.openshift-storage.svc.cluster.local:443 -showcerts 2>/dev/null > extra_ca_certs_noobaa.crt +---- ++ +.Example output ++ +[source,terminal] +---- +-----BEGIN CERTIFICATE----- +MIIFRjCCBC6gAwIBAgIUKd8q... +-----END CERTIFICATE----- +---- + +. Locate the custom config bundle secret that the Operator is using to deploy {productname} by entering the following command: ++ +[source,terminal] +---- +$ oc get quayregistry name-of-registry -o yaml | grep -i custom +---- + +. On the {ocp} console, locate the namespace where the {productname} Operator is deployed. Click *Workloads* -> *Secret* in the navigation pane to find the custom config bundle secret. + +. Open the secret and set it to *Editing* mode by clicking *Actions* -> *Edit* on the navigation pane. + +. Scroll to the end of the file and create a new key named `extra_ca_certs_noobaa.crt`. Paste the certificate generated in Step 1 of this procedure inside of the secret. + +. Save the file and let the {productname} Operator recycle the deployment. If reconciliation does not happen immediately, delete the Operator pod name and let it restart. For example: ++ +[source,terminal] +---- +$ oc delete pod quay-operator-xxxxx-xxxxxxxx -n <1> +---- + +For more information about this issue, see link:https://issues.redhat.com/browse/PROJQUAY-5174[PROJQUAY-5174]. + +[id="troubleshooting-502-unmanaged-storage"] +== Troubleshooting 502 errors when using unmanaged storage + +In some cases, pulling an image from a {productname} registry that is using RadosGW or Noobaa as an unmanaged object storage returns the following error: `parsing image configuration 502 (Bad Gateway):`. Use the following steps to resolve this issue. + +.Procedure + +. In your `config.yaml` file, update the `DISTRIBUTED_STORAGE_CONFIG` field. + +.. If you are using RadosGW storage: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + radosGWStorage: + - RadosGWStorage + - access_key: xxx + secret_key: xxx + bucket_name: xxx + hostname: rook-ceph-rgw-ocs-storagecluster-cephobjectstore.openshift-storage.svc.cluster.local + is_secure: true + port: 443 + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_PREFERENCE: + - radosGWStorage +---- + +.. If you are using NooBaa storage: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - RHOCSStorage + - access_key: xxx + bucket_name: xxx + hostname: s3.openshift-storage.svc.cluster.local + is_secure: true + port: "443" + secret_key: xxx + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- ++ +With these updates, you should be able to successfully pull images when using unmanaged object storage. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6998878[ Podman pull/push fails with 502 http code in QUAY] diff --git a/modules/external-registry-config-api-example.adoc b/modules/external-registry-config-api-example.adoc index a387f825e..3211fc31e 100644 --- a/modules/external-registry-config-api-example.adoc +++ b/modules/external-registry-config-api-example.adoc @@ -1,7 +1,7 @@ :_content-type: CONCEPT [id="external-registry-config-api-example"] -=== external_registry_config object reference += external_registry_config object reference [source,yaml] ---- diff --git a/modules/fetching-images-and-tags.adoc b/modules/fetching-images-and-tags.adoc new file mode 100644 index 000000000..0bd5eedc0 --- /dev/null +++ b/modules/fetching-images-and-tags.adoc @@ -0,0 +1,41 @@ +:_content-type: CONCEPT +[id="fetching-images-and-tags"] += Fetching an image by tag or digest + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers multiple ways of pulling images using Docker and Podman clients. + +.Procedure + +. Navigate to the *Tags* page of a repository. + +. Under *Manifest*, click the *Fetch Tag* icon. + +. When the popup box appears, users are presented with the following options: ++ +* Podman Pull (by tag) +* Docker Pull (by tag) +* Podman Pull (by digest) +* Docker Pull (by digest) ++ +Selecting any one of the four options returns a command for the respective client that allows users to pull the image. + +. Click *Copy Command* to copy the command, which can be used on the command-line interface (CLI). For example: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman pull quay.io/quayadmin/busybox:test2 +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ podman pull quay-server.example.com/quayadmin/busybox:test2 +---- +endif::[] diff --git a/modules/fips-overview.adoc b/modules/fips-overview.adoc index 666f86408..4f720da3f 100644 --- a/modules/fips-overview.adoc +++ b/modules/fips-overview.adoc @@ -6,6 +6,36 @@ [id="fips-overview"] = Federal Information Processing Standard (FIPS) readiness and compliance -The Federal Information Processing Standard (FIPS) developed by the National Institute of Standards and Technology (NIST) is regarded as the highly regarded for securing and encrypting sensitive data, notably in highly regulated areas such as banking, healthcare, and the public sector. {rhel} and {ocp} support the FIPS standard by providing a _FIPS mode_, in which the system only allows usage of specific FIPS-validated cryptographic modules like `openssl`. This ensures FIPS compliance. +The Federal Information Processing Standard (FIPS) developed by the National Institute of Standards and Technology (NIST) is regarded as the highly regarded for securing and encrypting sensitive data, notably in highly regulated areas such as banking, healthcare, and the public sector. {rhel} and {ocp} support FIPS by providing a _FIPS mode_, in which the system only allows usage of specific FIPS-validated cryptographic modules like `openssl`. This ensures FIPS compliance. -{productname} supports running on FIPS-enabled RHEL and {ocp} environments from {productname} version 3.5.0. +[id="enabling-fips-compliance"] +== Enabling FIPS compliance + +Use the following procedure to enable FIPS compliance on your {productname} deployment. + +.Prerequisite + +* If you are running a standalone deployment of {productname}, your {rhel} deployment is version 8 or later and FIPS-enabled. + +* If you are deploying {productname-ocp}, {ocp} is version 4.10 or later. + +* Your {productname} version is 3.5.0 or later. + +* If you are using the {productname-ocp} on an IBM Power or IBM Z cluster: +** {ocp} version 4.14 or later is required +** {productname} version 3.10 or later is required + +* You have administrative privileges for your {productname} deployment. + +.Procedure + +* In your {productname} `config.yaml` file, set the `FEATURE_FIPS` configuration field to `true`. For example: ++ +[source,yaml] +---- +--- +FEATURE_FIPS = true +--- +---- ++ +With `FEATURE_FIPS` set to `true`, {productname} runs using FIPS-compliant hash functions. \ No newline at end of file diff --git a/modules/first-user-api.adoc b/modules/first-user-api.adoc index 7174d368e..ff53b11de 100644 --- a/modules/first-user-api.adoc +++ b/modules/first-user-api.adoc @@ -1,38 +1,56 @@ :_content-type: PROCEDURE -[id="deploy-quay-api"] -= Using the API to deploy {productname} +[id="using-the-api-to-create-first-user"] += Using the API to create the first user -This section introduces using the API to deploy {productname}. +Use the following procedure to create the first user in your {productname} organization. -.Prerequisites +.Prerequisites * The config option `FEATURE_USER_INITIALIZE` must be set to `true`. -* No users can already exist in the database. - -For more information on pre-configuring your {productname} deployment, see the section xref:config-preconfigure-automation[Pre-configuring {productname} for automation] - -[id="using-the-api-to-create-first-user"] -== Using the API to create the first user +* No users can already exist in the database. -Use the following procedure to create the first user in your {productname} organization. +.Procedure [NOTE] ==== -This procedure requests an OAuth token by specifying `"access_token": true`. +This procedure requests an OAuth token by specifying `"access_token": true`. ==== -* Using the `status.registryEndpoint` URL, invoke the `/api/v1/user/initialize` API, passing in the username, password and email address by entering the following command: +. Open your {productname} configuration file and update the following configuration fields: ++ +[source,yaml] +---- +FEATURE_USER_INITIALIZE: true +SUPER_USERS: + - quayadmin +---- + +. Stop the {productname} service by entering the following command: ++ +[source,terminal] +---- +$ sudo podman stop quay +---- + +. Start the {productname} service by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d -p 80:8080 -p 443:8443 --name=quay -v $QUAY/config:/conf/stack:Z -v $QUAY/storage:/datastorage:Z {productrepo}/{quayimage}:{productminv} +---- + +. Run the following `CURL` command to generate a new user with a username, password, email, and access token: + [source,terminal] ---- -$ curl -X POST -k https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/user/initialize --header 'Content-Type: application/json' --data '{ "username": "quayadmin", "password":"quaypass123", "email": "quayadmin@example.com", "access_token": true}' +$ curl -X POST -k http://quay-server.example.com/api/v1/user/initialize --header 'Content-Type: application/json' --data '{ "username": "quayadmin", "password":"quaypass12345", "email": "quayadmin@example.com", "access_token": true}' ---- + -If successful, the command returns an object with the username, email, and encrypted password. For example: +If successful, the command returns an object with the username, email, and encrypted password. For example: + [source,yaml] ---- -{"access_token":"6B4QTRSTSD1HMIG915VPX7BMEZBVB9GPNY2FC2ED", "email":"quayadmin@example.com","encrypted_password":"1nZMLH57RIE5UGdL/yYpDOHLqiNCgimb6W9kfF8MjZ1xrfDpRyRs9NUnUuNuAitW","username":"quayadmin"} +{"access_token":"6B4QTRSTSD1HMIG915VPX7BMEZBVB9GPNY2FC2ED", "email":"quayadmin@example.com","encrypted_password":"1nZMLH57RIE5UGdL/yYpDOHLqiNCgimb6W9kfF8MjZ1xrfDpRyRs9NUnUuNuAitW","username":"quayadmin"} # gitleaks:allow ---- + If a user already exists in the database, an error is returned: @@ -41,10 +59,24 @@ If a user already exists in the database, an error is returned: ---- {"message":"Cannot initialize user in a non-empty database"} ---- -+ -If your password is not at least eight characters or contains whitespace, an error is returned: ++ +If your password is not at least eight characters or contains whitespace, an error is returned: + [source,terminal] ---- {"message":"Failed to initialize user: Invalid password, password must be at least 8 characters and contain no whitespace."} +---- + +. Log in to your {productname} deployment by entering the following command: ++ +[source,terminal] +---- +$ sudo podman login -u quayadmin -p quaypass12345 http://quay-server.example.com --tls-verify=false +---- ++ +.Example output ++ +[source,terminal] +---- +Login Succeeded! ---- \ No newline at end of file diff --git a/modules/frequently-asked-questions.adoc b/modules/frequently-asked-questions.adoc new file mode 100644 index 000000000..cc73750da --- /dev/null +++ b/modules/frequently-asked-questions.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="frequently-asked-questions"] += Frequently asked questions + +The "Frequently Asked Questions" (FAQ) document for {productname} aims to address common inquiries and provide comprehensive answers regarding the functionality, features, and usage of the {productname} container registry platform. This document serves as a valuable resource for users, administrators, and developers seeking quick and accurate information about various aspects of {productname}. + +The FAQ covers a wide range of topics, including account management, repository operations, security features, integration with other systems, troubleshooting tips, and best practices for optimizing the usage of {productname}. \ No newline at end of file diff --git a/modules/garbage-collection.adoc b/modules/garbage-collection.adoc index 8c2774319..2a99d2f85 100644 --- a/modules/garbage-collection.adoc +++ b/modules/garbage-collection.adoc @@ -1,14 +1,13 @@ :_content-type: CONCEPT +[id="garbage-collection"] = {productname} garbage collection -[[garbage-collection]] - -== About {productname} garbage collection {productname} includes automatic and continuous image garbage collection. Garbage collection ensures efficient use of resources for active objects by removing objects that occupy sizeable amounts of disk space, such as dangling or untagged images, repositories, and blobs, including layers and manifests. Garbage collection performed by {productname} can reduce downtime in your organization's environment. +[id="garbage-collection-practice"] == {productname} garbage collection in practice -Currently, all garbage collection happens discreetly; there are no commands to manually run garbage collection. {productname} provides metrics that track the status of the different garbage collection workers. +Currently, all garbage collection happens discreetly, and there are no commands to manually run garbage collection. {productname} provides metrics that track the status of the different garbage collection workers. For namespace and repository garbage collection, the progress is tracked based on the size of their respective queues. Namespace and repository garbage collection workers require a global lock to work. As a result, and for performance reasons, only one worker runs at a time. @@ -33,15 +32,17 @@ For each type of garbage collection, {productname} provides metrics for the numb image:garbage-collection-metrics.png[Garbage collection metrics] +[id="measuring-storage-reclamation"] === Measuring storage reclamation {productname} does not have a way to track how much space is freed up by garbage collection. Currently, the best indicator of this is by checking how many blobs have been deleted in the provided metrics. [NOTE] ==== -The `UploadedBlob` table in the {productname} metrics tracks the various blobs that are associated with a repository. When a blob is uploaded, it will not be garbage collected before the time designated by the `PUSH_TEMP_TAG_EXPIRATION_SEC` parameter. This is to avoid prematurely deleting blobs that are part of an ongoing push. For example, if garbage collection is set to run often, and a tag is deleted in the span of less than one hour, then it is possible that the associated blobs will not get cleaned up immediately. Instead, and assuming that the time designated by the `PUSH_TEMP_TAG_EXPIRATION_SEC` parameter has passed, the associated blobs will be removed the next time garbage collection runs on that same repository. +The `UploadedBlob` table in the {productname} metrics tracks the various blobs that are associated with a repository. When a blob is uploaded, it will not be garbage collected before the time designated by the `PUSH_TEMP_TAG_EXPIRATION_SEC` parameter. This is to avoid prematurely deleting blobs that are part of an ongoing push. For example, if garbage collection is set to run often, and a tag is deleted in the span of less than one hour, then it is possible that the associated blobs will not get cleaned up immediately. Instead, and assuming that the time designated by the `PUSH_TEMP_TAG_EXPIRATION_SEC` parameter has passed, the associated blobs will be removed the next time garbage collection is triggered to run by another expired tag on the same repository. ==== +[id="garbage-collection-configuration-fields"] == Garbage collection configuration fields The following configuration fields are available to customize what is garbage collected, and the frequency at which garbage collection occurs: @@ -62,6 +63,7 @@ The following configuration fields are available to customize what is garbage co |=== +[id="disabling-garbage-collection"] == Disabling garbage collection The garbage collection features for image tags, namespaces, and repositories are stored in the `config.yaml` file. These features default to `true`. @@ -74,12 +76,14 @@ In rare cases, you might want to disable garbage collection, for example, to con There is no command to manually run garbage collection. Instead, you would disable, and then re-enable, the garbage collection feature. ==== +[id="garbage-collection-quota-management"] == Garbage collection and quota management {productname} introduced quota management in 3.7. With quota management, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. As of {productname} 3.7, garbage collection reclaims memory that was allocated to images, repositories, and blobs after deletion. Because the garbage collection feature reclaims memory after deletion, there is a discrepancy between what is stored in an environment's disk space and what quota management is reporting as the total consumption. There is currently no workaround for this issue. +[id="garbage-collection-procedure"] == Garbage collection in practice Use the following procedure to check your {productname} logs to ensure that garbage collection is working. @@ -94,6 +98,7 @@ $ sudo podman logs ---- + Example output: ++ [source,terminal] ---- gcworker stdout | 2022-11-14 18:46:52,458 [63] [INFO] [apscheduler.executors.default] Job "GarbageCollectionWorker._garbage_collection_repos (trigger: interval[0:00:30], next run at: 2022-11-14 18:47:22 UTC)" executed successfully @@ -109,11 +114,13 @@ $ podman logs quay-app ---- + Example output: ++ [source,terminal] ---- gunicorn-web stdout | 2022-11-14 19:23:44,574 [233] [INFO] [gunicorn.access] 192.168.0.38 - - [14/Nov/2022:19:23:44 +0000] "DELETE /api/v1/repository/quayadmin/busybox/tag/test HTTP/1.0" 204 0 "http://quay-server.example.com/repository/quayadmin/busybox?tab=tags" "Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0" ---- +[id="garbage-collection-metrics"] == {productname} garbage collection metrics The following metrics show how many resources have been removed by garbage collection. These metrics show how many times the garbage collection workers have run and how many namespaces, repositories, and blobs were removed. @@ -166,4 +173,4 @@ quay_gc_storage_blobs_deleted_created{host="example-registry-quay-app-6df87f7b66 # TYPE quay_gc_storage_blobs_deleted_total counter quay_gc_storage_blobs_deleted_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 ... ----- +---- \ No newline at end of file diff --git a/modules/geo-repl-sslerror.adoc b/modules/geo-repl-sslerror.adoc new file mode 100644 index 000000000..375d33c4d --- /dev/null +++ b/modules/geo-repl-sslerror.adoc @@ -0,0 +1,30 @@ +:_content-type: PROCEDURE +[id="geo-repl-sslerror"] += Geo-replication errors out with SSLError + +In some cases, using the `podman push` command might return the following error: + +[source,terminal] +---- +storagereplication stdout | 2021-12-16 14:56:29,602 [144] [ERROR] [__main__] Failed to copy path `sha256/9f/9f9b90db7acda0f3f43e720ac9d54a7e623078fc7af6cf0c1d055410986d3f10` of image storage 0a014260-01a3-4a54-8dd6-784de7bf4feb to location dr +toragereplication stdout | Traceback (most recent call last): +storagereplication stdout | File "/usr/local/lib/python3.8/site-packages/urllib3/util/ssl_.py", line 336, in ssl_wrap_socket +storagereplication stdout | context.load_verify_locations(ca_certs, ca_cert_dir) +storagereplication stdout | ssl.SSLError: [X509] PEM lib (_ssl.c:4265) +storagereplication stdout | During handling of the above exception, another exception occurred: +. +storagereplication stdout | File "/usr/local/lib/python3.8/site-packages/botocore/httpsession.py", line 338, in send +storagereplication stdout | raise SSLError(endpoint_url=request.url, error=e) +storagereplication stdout | botocore.exceptions.SSLError: SSL validation failed for https://s3-openshift-storage.apps.ocp1.rosbank.rus.socgen/quay-bucket-dr [X509] PEM lib (_ssl.c:4265) +storagereplication stdout | 2021-12-16 14:56:29,603 [144] [WARNING] [workers.queueworker] An error occurred processing request: {"namespace_user_id": 1, "storage_id": "0a014260-01a3-4a54-8dd6-784de7bf4feb"} +storagereplication stdout | 2021-12-16 14:56:29,603 [144] [WARNING] [workers.queueworker] Job exception: +---- + +`SSLError` usually occurs after multiple certificates signing the same thing are added to your {productname} deployment. This error is most commonly seen on regular pushes and LDAP connectivity, even when outside sources are used, for example, AWS storage buckets. + +As a workaround for this issue, remove certificates from the `extra_ca_certs` one by one until you find the duplicate. After each removal, restart the `Quay` pod to test whether the issue persists. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6612551[Quay georeplication errors out with SSLError]. \ No newline at end of file diff --git a/modules/geo-repl-troubleshooting-issues.adoc b/modules/geo-repl-troubleshooting-issues.adoc new file mode 100644 index 000000000..840707892 --- /dev/null +++ b/modules/geo-repl-troubleshooting-issues.adoc @@ -0,0 +1,82 @@ +:_content-type: PROCEDURE +[id="geo-repl-troubleshooting-issues"] += Troubleshooting geo-replication for {productname} + +Use the following sections to troubleshoot geo-replication for {productname}. + +//// +[id="check-geo-repl-config"] +== Checking the geo-replication configuration + +Use the following procedure to check your geo-replication configuration in your {productname} `config.yaml` file. + +[IMPORTANT] +==== +The same configuration must be used across all regions. +==== + +.Procedure + +. Check your geo-replication configuration. + +.. If you are using the {productname} Operator, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it quay-pod -- cat /conf/stack/config.yaml +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it quay-container cat /conf/stack/config.yaml +---- +//// + +[id="check-data-replication"] +== Checking data replication in backend buckets + +Use the following procedure to ensure that your data is properly replicated in all backend buckets. + +.Prerequisites + +* You have installed the `aws` CLI. + +.Procedure + +. Enter the following command to ensure that your data is replicated in all backend buckets: ++ +[source,terminal] +---- +$ aws --profile quay_prod_s3 --endpoint=http://10.0.x.x:port s3 ls ocp-quay --recursive --human-readable --summarize +---- ++ +.Example output ++ +[source,terminal] +---- +Total Objects: 17996 +Total Size: 514.4 GiB +---- + +[id="check-backend-storage-running"] +== Checking the status of your backend storage + +Use the following resources to check the status of your backend storage. + +* *Amazon Web Service Storage (AWS)*. Check the AWS S3 service health status on the link:https://health.aws.amazon.com/health/status[AWS Service Health Dashboard]. Validate your access to S3 by listing objects in a known bucket using the `aws` CLI or SDKs. + +* *Google Cloud Storage (GCS)*. Check the link:https://status.cloud.google.com/[Google Cloud Status Dashboard] for the status of the GCS service. Verify your access to GCS by listing objects in a known bucket using the Google Cloud SDK or GCS client libraries. + +* *NooBaa*. Check the NooBaa management console or administrative interface for any health or status indicators. Ensure that the NooBaa services and related components are running and accessible. Verify access to NooBaa by listing objects in a known bucket using the NooBaa CLI or SDK. + +* **{odf}**. Check the {ocp} Console or management interface for the status of the {odf} components. Verify the availability of {odf} S3 interface and services. Ensure that the {odf} services are running and accessible. Validate access to {odf} S3 by listing objects in a known bucket using the appropriate S3-compatible SDK or CLI. + +* **Ceph**. Check the status of Ceph services, including Ceph monitors, OSDs, and RGWs. Validate that the Ceph cluster is healthy and operational. Verify access to Ceph object storage by listing objects in a known bucket using the appropriate Ceph object storage API or CLI. + +* **Azure Blob Storage**. Check the link:https://azure.status.microsoft/en-us/status[Azure Status Dashboard] to see the health status of the Azure Blob Storage service. Validate your access to Azure Blob Storage by listing containers or objects using the Azure CLI or Azure SDKs. + +* **OpenStack Swift**. Check the link:https://www.ibm.com/docs/ro/cmwo/4.3.0.0?topic=services-checking-status[OpenStack Status] page to verify the status of the OpenStack Swift service. Ensure that the Swift services, like the proxy server, container servers, object servers, are running and accessible. Validate your access to Swift by listing containers or objects using the appropriate Swift CLI or SDK. + +After checking the status of your backend storage, ensure that all {productname} instances have access to all s3 storage backends. \ No newline at end of file diff --git a/modules/georepl-deploy-operator.adoc b/modules/georepl-deploy-operator.adoc index 6427131e4..be3add226 100644 --- a/modules/georepl-deploy-operator.adoc +++ b/modules/georepl-deploy-operator.adoc @@ -1,19 +1,27 @@ -[[georepl-deploy-operator]] -= Setting up geo-replication on Openshift +:_content-type: PROCEDURE +[id="georepl-deploy-operator"] += Setting up geo-replication on {ocp} +Use the following procedure to set up geo-replication on {ocp}. .Procedure -. Deploy Quay postgres instance: +. Deploy a postgres instance for {productname}. -.. Login to the database -.. Create a database for Quay +. Login to the database by entering the following command: ++ +[source,terminal] +---- +psql -U -h -p -d +---- + +. Create a database for {productname} named `quay`. For example: + [source,terminal] ---- CREATE DATABASE quay; ---- -.. Enable pg_trm extension inside the database +. Enable pg_trm extension inside the database + [source,terminal] ---- @@ -30,7 +38,7 @@ CREATE EXTENSION IF NOT EXISTS pg_trgm; ==== .. Deploy a VM for Redis -.. Make sure that it is accessible from the clusters where Quay is running +.. Verify that it is accessible from the clusters where {productname} is running .. Port 6379/TCP must be open .. Run Redis inside the instance + @@ -40,20 +48,22 @@ sudo dnf install -y podman podman run -d --name redis -p 6379:6379 redis ---- -. Create two object storage backends, one for each cluster -+ -Ideally one object storage bucket will be close to the 1st cluster (primary) while the other will run closer to the 2nd cluster (secondary). +. Create two object storage backends, one for each cluster. Ideally, one object storage bucket will be close to the first, or primary, cluster, and the other will run closer to the second, or secondary, cluster. -. Deploy the clusters with the same config bundle, using environment variable overrides to select the appropriate storage backend for an individual cluster +. Deploy the clusters with the same config bundle, using environment variable overrides to select the appropriate storage backend for an individual cluster. -. Configure a load balancer, to provide a single entry point to the clusters +. Configure a load balancer to provide a single entry point to the clusters. +[id="configuring-geo-repl"] +== Configuring geo-replication for the {productname} on {ocp} -== Configuration +Use the following procedure to configure geo-replication for the {productname-ocp}. -The `config.yaml` file is shared between clusters, and will contain the details for the common PostgreSQL, Redis and storage backends: +.Procedure -.config.yaml +. Create a `config.yaml` file that is shared between clusters. This `config.yaml` file contains the details for the common PostgreSQL, Redis and storage backends: ++ +.Geo-replication `config.yaml` file [source,yaml] ---- SERVER_HOSTNAME: <1> @@ -67,6 +77,7 @@ BUILDLOGS_REDIS: USER_EVENTS_REDIS: host: 10.19.0.2 port: 6379 +DATABASE_SECRET_KEY: 0ce4f796-c295-415b-bf9d-b315114704b8 DISTRIBUTED_STORAGE_CONFIG: usstorage: - GoogleCloudStorage @@ -89,23 +100,24 @@ DISTRIBUTED_STORAGE_PREFERENCE: FEATURE_STORAGE_REPLICATION: true ---- <1> A proper `SERVER_HOSTNAME` must be used for the route and must match the hostname of the global load balancer. -<2> To retrieve the configuration file for a Clair instance deployed using the OpenShift Operator, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Retrieving the Clair config]. - -Create the `configBundleSecret`: +<2> To retrieve the configuration file for a Clair instance deployed using the {ocp} Operator, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Retrieving the Clair config]. +. Create the `configBundleSecret` by entering the following command: ++ [source,terminal] ---- $ oc create secret generic --from-file config.yaml=./config.yaml georep-config-bundle ---- -In each of the clusters, set the `configBundleSecret` and use the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environmental variable override to configure the appropriate storage for that cluster: - +. In each of the clusters, set the `configBundleSecret` and use the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environmental variable override to configure the appropriate storage for that cluster. For example: ++ [NOTE] ==== The `config.yaml` file between both deployments must match. If making a change to one cluster, it must also be changed in the other. ==== - -.US cluster ++ +[source,yaml] +.US cluster `QuayRegistry` example ---- apiVersion: quay.redhat.com/v1 kind: QuayRegistry @@ -143,9 +155,10 @@ spec: + [NOTE] ==== -Because TLS is unmanaged, and the route is managed, you must supply the certificates with either with the config tool or directly in the config bundle. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-preconfigure#operator-preconfig-tls-routes[Configuring TLS and routes]. +Because SSL/TLS is unmanaged, and the route is managed, you must supply the certificates directly in the config bundle. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-preconfigure#operator-preconfig-tls-routes[Configuring TLS and routes]. ==== - ++ +[source,yaml] .European cluster ---- apiVersion: quay.redhat.com/v1 @@ -184,5 +197,5 @@ spec: + [NOTE] ==== -Because TLS is unmanaged, and the route is managed, you must supply the certificates with either with the config tool or directly in the config bundle. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-preconfigure#operator-preconfig-tls-routes[Configuring TLS and routes]. -==== +Because SSL/TLS is unmanaged, and the route is managed, you must supply the certificates directly in the config bundle. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-preconfigure#operator-preconfig-tls-routes[Configuring TLS and routes]. +==== \ No newline at end of file diff --git a/modules/georepl-intro.adoc b/modules/georepl-intro.adoc index fc86e8a32..0dafd7a04 100644 --- a/modules/georepl-intro.adoc +++ b/modules/georepl-intro.adoc @@ -1,20 +1,7 @@ :_content-type: CONCEPT -[id="arch-georepl-intro"] +[id="georepl-intro"] = Geo-replication Geo-replication allows multiple, geographically distributed {productname} deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed {productname} setup. Image data is asynchronously replicated in the background with transparent failover and redirect for clients. -With {productname} 3.7, deployments of {productname} with geo-replication is supported on standalone and Operator deployments. - -[id="arch-georpl-features"] -== Geo-replication features - -* When geo-replication is configured, container image pushes will be written to the preferred storage engine for that {productname} instance. This is typically the nearest storage backend within the region. - -* After the initial push, image data will be replicated in the background to other storage engines. - -* The list of replication locations is configurable and those can be different storage backends. - -* An image pull will always use the closest available storage engine, to maximize pull performance. - -* If replication has not been completed yet, the pull will use the source storage backend instead. \ No newline at end of file +Deployments of {productname} with geo-replication is supported on standalone and Operator deployments. \ No newline at end of file diff --git a/modules/georepl-mixed-storage.adoc b/modules/georepl-mixed-storage.adoc index 4bc972898..fa93a33cd 100644 --- a/modules/georepl-mixed-storage.adoc +++ b/modules/georepl-mixed-storage.adoc @@ -7,6 +7,6 @@ * A VPN to prevent visibility of the internal storage, _or_ * A token pair that only allows access to the specified bucket used by {productname} -This will result in the public cloud instance of {productname} having access to on premise storage, but the network will be encrypted, protected, and will use ACLs, thereby meeting security requirements. +This results in the public cloud instance of {productname} having access to on-premise storage, but the network will be encrypted, protected, and will use ACLs, thereby meeting security requirements. -If you cannot implement these security measures, it may be preferable to deploy two distinct {productname} registries and to use repository mirroring as an alternative to geo-replication. \ No newline at end of file +If you cannot implement these security measures, it might be preferable to deploy two distinct {productname} registries and to use repository mirroring as an alternative to geo-replication. \ No newline at end of file diff --git a/modules/georepl-prereqs.adoc b/modules/georepl-prereqs.adoc index efc9bf622..b0907610c 100644 --- a/modules/georepl-prereqs.adoc +++ b/modules/georepl-prereqs.adoc @@ -6,7 +6,11 @@ * In case of an object storage system failure of one geo-replicating site, that site's {productname} deployment must be shut down so that clients are redirected to the remaining site with intact storage systems by a global load balancer. Otherwise, clients will experience pull and push failures. -* {productname} has no internal awareness of the health or availability of the connected object storage system. If the object storage system of one site becomes unavailable, there will be no automatic redirect to the remaining storage system, or systems, of the remaining site, or sites. +* {productname} has no internal awareness of the health or availability of the connected object storage system. Users must configure a global load balancer (LB) to monitor the health of your distributed system and to route traffic to different sites based on their storage status. + +* To check the status of your geo-replication deployment, you must use the `/health/endtoend` checkpoint, which is used for global health monitoring. You must configure the redirect manually using the `/health/endtoend` endpoint. The `/health/instance` end point only checks local instance health. + +* If the object storage system of one site becomes unavailable, there will be no automatic redirect to the remaining storage system, or systems, of the remaining site, or sites. * Geo-replication is asynchronous. The permanent loss of a site incurs the loss of the data that has been saved in that sites' object storage system but has not yet been replicated to the remaining sites at the time of failure. @@ -14,7 +18,7 @@ + Geo-replication does not replicate the database. In the event of an outage, {productname} with geo-replication enabled will not failover to another database. -* A single Redis cache is shared across the entire {productname} setup and needs to accessible by all {productname} pods. +* A single Redis cache is shared across the entire {productname} setup and needs to be accessible by all {productname} pods. * The exact same configuration should be used across all regions, with exception of the storage backend, which can be configured explicitly using the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable. @@ -24,7 +28,7 @@ Geo-replication does not replicate the database. In the event of an outage, {pro * Alternatively, the storage proxy option can be used. -* The entire storage backend, for example, all blobs, is replicated. Repository mirroring, by contrast, can be limited to an organization, repository, or image. +* The entire storage backend, for example, all blobs, is replicated. Repository mirroring, by contrast, can be limited to a repository, or an image. * All {productname} instances must share the same entrypoint, typically through a load balancer. @@ -32,6 +36,7 @@ Geo-replication does not replicate the database. In the event of an outage, {pro * Geo-replication requires your Clair configuration to be set to `unmanaged`. An unmanaged Clair database allows the {productname} Operator to work in a geo-replicated environment, where multiple instances of the {productname} Operator must communicate with the same database. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index#clair-unmanaged[Advanced Clair configuration]. -* Geo-Replication requires SSL/TSL certificates and keys. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/index#using_ssl_to_protect_connections_to_red_hat_quay[Using SSL/TSL to protect connections to {productname}]. +* Geo-Replication requires SSL/TLS certificates and keys. For more information, see * Geo-Replication requires SSL/TLS certificates and keys. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/proof_of_concept_-_deploying_red_hat_quay/advanced-quay-poc-deployment[Proof of concept deployment using SSL/TLS certificates]. +. If the above requirements cannot be met, you should instead use two or more distinct {productname} deployments and take advantage of repository mirroring functions. \ No newline at end of file diff --git a/modules/getting-support.adoc b/modules/getting-support.adoc new file mode 100644 index 000000000..8963d5c9f --- /dev/null +++ b/modules/getting-support.adoc @@ -0,0 +1,74 @@ +:_content-type: CONCEPT +[id="getting-support"] += Getting support + +If you experience difficulty with a procedure described in this documentation, or with {productname} in general, visit the link:http://access.redhat.com[Red Hat Customer Portal]. From the Customer Portal, you can: + +* Search or browse through the Red Hat Knowledgebase of articles and solutions relating to Red Hat products. +* Submit a support case to Red Hat Support. +* Access other product documentation. + +To identify issues with your deployment, you can use the {productname} debugging tool, or check the health endpoint of your deployment to obtain information about your problem. After you have debugged or obtained health information about your deployment, you can search the Red Hat Knowledgebase for a solution or file a support ticket. + +If you have a suggestion for improving this documentation or have found an +error, submit a link:https://issues.redhat.com/secure/CreateIssue!default.jspa[Jira issue] to the `ProjectQuay` project. Provide specific details, such as the section name and {productname} version. + +[id="support-knowledgebase-about"] +== About the Red Hat Knowledgebase + +The link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] provides rich content aimed at helping you make the most of Red Hat's products and technologies. The Red Hat Knowledgebase consists of articles, product documentation, and videos outlining best practices on installing, configuring, and using Red Hat products. In addition, you can search for solutions to known issues, each providing concise root cause descriptions and remedial steps. + +The {productname} Support Team also maintains a link:https://access.redhat.com/articles/6975387[Consolidate troubleshooting article for {productname}] that details solutions to common problems. This is an evolving document that can help users navigate various issues effectively and efficiently. + +[id="support-knowledgebase-search"] +== Searching the Red Hat Knowledgebase + +In the event of an {productname} issue, you can perform an initial search to determine if a solution already exists within the Red Hat Knowledgebase. + +.Prerequisites + +* You have a Red Hat Customer Portal account. + +.Procedure + +. Log in to the link:http://access.redhat.com[Red Hat Customer Portal]. + +. In the main Red Hat Customer Portal search field, input keywords and strings relating to the problem, including: ++ +* {productname} components (such as *database*) +* Related procedure (such as *installation*) +* Warnings, error messages, and other outputs related to explicit failures + +. Click *Search*. + +. Select the *{productname}* product filter. + +. Select the *Knowledgebase* content type filter. + +[id="support-submitting-a-case"] +== Submitting a support case + +.Prerequisites + +* You have a Red Hat Customer Portal account. +* You have a Red Hat standard or premium Subscription. + +.Procedure + +. Log in to the link:http://access.redhat.com[Red Hat Customer Portal] and select *Open a support case*. + +. Select the *Troubleshoot* tab. + +. For *Summary*, enter a concise but descriptive problem summary and further details about the symptoms being experienced, as well as your expectations. + +. Review the list of suggested Red Hat Knowledgebase solutions for a potential match against the problem that is being reported. If the suggested articles do not address the issue, continue to the following step. + +. For *Product*, select *Red Hat Quay*. + +. Select the version of {productname} that you are using. + +. Click *Continue*. + +. Optional. Drag and drop, paste, or browse to upload a file. This could be debug logs gathered from your {productname} deployment. + +. Click *Get support* to file your ticket. \ No newline at end of file diff --git a/modules/health-check-quay.adoc b/modules/health-check-quay.adoc new file mode 100644 index 000000000..b93814bec --- /dev/null +++ b/modules/health-check-quay.adoc @@ -0,0 +1,47 @@ +:_content-type: CONCEPT +[id="health-check-quay"] += Performing health checks on {productname} deployments + +Health check mechanisms are designed to assess the health and functionality of a system, service, or component. Health checks help ensure that everything is working correctly, and can be used to identify potential issues before they become critical problems. By monitoring the health of a system, {productname} administrators can address abnormalities or potential failures for things like geo-replication deployments, Operator deployments, standalone {productname} deployments, object storage issues, and so on. Performing health checks can also help reduce the likelihood of encountering troubleshooting scenarios. + +Health check mechanisms can play a role in diagnosing issues by providing valuable information about the system's current state. By comparing health check results with expected benchmarks or predefined thresholds, deviations or anomalies can be identified quicker. + +[id="health-check-endpoints"] +== {productname} health check endpoints + +[IMPORTANT] +==== +Links contained herein to any external website(s) are provided for convenience only. Red Hat has not reviewed the links and is not responsible for the content or its availability. The inclusion of any link to an external website does not imply endorsement by Red Hat of the website or its entities, products, or services. You agree that Red Hat is not responsible or liable for any loss or expenses that may result due to your use of (or reliance on) the external site or content. +==== + +{productname} has several health check endpoints. The following table shows you the health check, a description, an endpoint, and an example output. + +.Health check endpoints +[cols="1a,3a,2a,2a",options="header"] +|=== +|Health check |Description |Endpoint |Example output +|`instance` | The `instance` endpoint acquires the entire status of the specific {productname} instance. Returns a `dict` with key-value pairs for the following: `auth`, `database`, `disk_space`, `registry_gunicorn`, `service_key`, and `web_gunicorn.` Returns a number indicating the health check response of either `200`, which indicates that the instance is healthy, or `503`, which indicates an issue with your deployment. |`https://{quay-ip-endpoint}/health/instance` _or_ `https://{quay-ip-endpoint}/health` | `{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200}` + +|`endtoend` |The `endtoend` endpoint conducts checks on all services of your {productname} instance. Returns a `dict` with key-value pairs for the following: `auth`, `database`, `redis`, `storage`. Returns a number indicating the health check response of either `200`, which indicates that the instance is healthy, or `503`, which indicates an issue with your deployment. |`https://{quay-ip-endpoint}/health/endtoend` | `{"data":{"services":{"auth":true,"database":true,"redis":true,"storage":true}},"status_code":200}` + +|`warning` |The `warning` endpoint conducts a check on the warnings. Returns a `dict` with key-value pairs for the following: `disk_space_warning`. Returns a number indicating the health check response of either `200`, which indicates that the instance is healthy, or `503`, which indicates an issue with your deployment. +|`https://{quay-ip-endpoint}/health/warning` | `{"data":{"services":{"disk_space_warning":true}},"status_code":503}` +|=== + +[id="instance-endpoint-quay"] +== Navigating to a {productname} health check endpoint + +Use the following procedure to navigate to the `instance` endpoint. This procedure can be repeated for `endtoend` and `warning` endpoints. + +.Procedure + +. On your web browser, navigate to `https://{quay-ip-endpoint}/health/instance`. + +. You are taken to the health instance page, which returns information like the following: ++ +[source,json] +---- +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- ++ +For {productname}, `"status_code": 200` means that the instance is health. Conversely, if you receive `"status_code": 503`, there is an issue with your deployment. \ No newline at end of file diff --git a/modules/helm-oci-prereqs.adoc b/modules/helm-oci-prereqs.adoc index a82c52692..964b8bc8b 100644 --- a/modules/helm-oci-prereqs.adoc +++ b/modules/helm-oci-prereqs.adoc @@ -1,20 +1,106 @@ -[[helm-oci-prereqs]] +:_content-type: CONCEPT +[id="helm-oci-prereqs"] = Helm and OCI prerequisites -* **Trusted certificates:** Communication between the Helm client and Quay is facilitated over HTTPS and as of Helm 3.5, support is only available for registries communicating over HTTPS with trusted certificates. In addition, the operating system must trust the certificates exposed by the registry. Support in future Helm releases will allow for communicating with remote registries insecurely. With that in mind, ensure that your operating system has been configured to trust the certificates used by Quay, for example: +Helm simplifies how applications are packaged and deployed. Helm uses a packaging format called _Charts_ which contain the Kubernetes resources representing an application. +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +supports Helm charts so long as they are a version supported by OCI. + +Use the following procedures to pre-configure your system to use Helm and other OCI media types. + +The most recent version of Helm can be downloaded from the link:https://github.com/helm/helm/releases[Helm releases] page. +ifeval::["{context}" == "use-quay"] +After you have downloaded Helm, you must enable your system to trust SSL/TLS certificates used by {productname}. + +//// +[id="installing-helm"] +== Installing Helm + +Use the following procedure to install the Helm client. + +.Procedure + +. Download the latest version of Helm from the link:https://github.com/helm/helm/releases[Helm releases] page. + +. Enter the following command to unpack the Helm binary: + +[source,terminal] ---- -$ sudo cp rootCA.pem /etc/pki/ca-trust/source/anchors/ -$ sudo update-ca-trust extract +$ tar -zxvf helm-v3.8.2-linux-amd64.tar.gz ---- -* **Generally available:** As of Helm 3.8, OCI registry support for charts is now generally available. - -* **Install Helm client:** Download your desired version from the link:https://github.com/helm/helm/releases[Helm releases] page. Unpack it and move the helm binary to its desired destination: +. Move the Helm binary to the desired location: + +[source,terminal] ---- -$ tar -zxvf helm-v3.8.2-linux-amd64.tar.gz $ mv linux-amd64/helm /usr/local/bin/helm ---- -* **Create organization in Quay:** Create a new organization for storing the Helm charts, using the Quay registry UI. For example, create an organization named `helm`. +For more information about installing Helm, see the link:https://helm.sh/docs/intro/install/[Installing Helm] documentation. + +[id="upgrading-helm-38"] +== Upgrading to Helm 3.8 + +Support for OCI registry charts requires that Helm has been upgraded to at least 3.8. If you have already downloaded Helm and need to upgrade to Helm 3.8, see the link:https://helm.sh/docs/helm/helm_upgrade/[Helm Upgrade] documentation. +//// +[id="enabling-system-trust-ssl-tls-certs"] +== Enabling your system to trust SSL/TLS certificates used by {productname} + +Communication between the Helm client and {productname} is facilitated over HTTPS. As of Helm 3.5, support is only available for registries communicating over HTTPS with trusted certificates. In addition, the operating system must trust the certificates exposed by the registry. You must ensure that your operating system has been configured to trust the certificates used by {productname}. Use the following procedure to enable your system to trust the custom certificates. + +.Procedure + +. Enter the following command to copy the `rootCA.pem` file to the `/etc/pki/ca-trust/source/anchors/` folder: ++ +[source,terminal] +---- +$ sudo cp rootCA.pem /etc/pki/ca-trust/source/anchors/ +---- + +. Enter the following command to update the CA trust store: ++ +[source,terminal] +---- +$ sudo update-ca-trust extract +---- +endif::[] + +//// + +[id="creating-organization-helm"] +== Creating an organization for Helm + +It is recommended that you create a new organization for storing Helm charts in +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +after you have downloaded the Helm client. Use the following procedure to create a new organization using the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. + +.Procedure + +ifeval::["{context}" == "quay-io"] +. Log in to your {quayio} deployment. +endif::[] +ifeval::["{context}" == "use-quay"] +. Log in to your {productname} deployment. +endif::[] + +. Click *Create New Organization*. + +. Enter a name for the organization, for example, *helm*. Then, click *Create Organization*. +//// \ No newline at end of file diff --git a/modules/helm-oci-quay.adoc b/modules/helm-oci-quay.adoc index 6eebb5fb0..05eb6edf3 100644 --- a/modules/helm-oci-quay.adoc +++ b/modules/helm-oci-quay.adoc @@ -1,36 +1,52 @@ -[[helm-oci-quay]] -= Helm charts with {productname} +:_content-type: PROCEDURE +[id="using-helm-charts"] += Using Helm charts -Helm, as a graduated project of the Cloud Native Computing Foundation (CNCF), has become the de facto package manager for Kubernetes as it simplifies how applications are packaged and deployed. Helm uses a packaging format called Charts which contain the Kubernetes resources representing an application. Charts can be made available for general distribution and consumption in repositories. A Helm repository is an HTTP server that serves an `index.yaml` metadata file and optionally a set of packaged charts. Beginning with Helm version 3, support was made available for distributing charts in OCI registries as an alternative to a traditional repository. +Use the following example to download and push an etherpad chart from the Red Hat Community of Practice (CoP) repository. -== Using Helm charts with {productname} +.Prerequisites -Use the following example to download and push an etherpad chart from the Red Hat Community of Practice (CoP) repository. +* You have logged into +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] .Procedure - -. Add a chart repository: +//// +ifeval::["{context}" == "use-quay"] +. As a {productname} administrator, enable support for Helm by setting `FEATURE_GENERAL_OCI_SUPPORT` to `true` in your `config.yaml` file: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +---- +endif::[] +//// +. Add a chart repository by entering the following command: + [source,terminal] ---- $ helm repo add redhat-cop https://redhat-cop.github.io/helm-charts ---- -. Update the information of available charts locally from the chart repository: +. Enter the following command to update the information of available charts locally from the chart repository: + [source,terminal] ---- $ helm repo update ---- -. Download a chart from a repository: +. Enter the following command to pull a chart from a repository: + [source,terminal] ---- $ helm pull redhat-cop/etherpad --version=0.0.4 --untar ---- -. Package the chart into a chart archive: +. Enter the following command to package the chart into a chart archive: + [source,terminal] ---- @@ -44,19 +60,38 @@ Example output Successfully packaged chart and saved it to: /home/user/linux-amd64/etherpad-0.0.4.tgz ---- -. Log in to your Quay repository using `helm registry login`: +ifeval::["{context}" == "quay-io"] +. Log in to {quayio} using `helm registry login`: ++ +[source,terminal] +---- +$ helm registry login quay.io +---- +endif::[] +ifeval::["{context}" == "use-quay"] +. Log in to {productname} using `helm registry login`: + [source,terminal] ---- $ helm registry login quay370.apps.quayperf370.perfscale.devcluster.openshift.com ---- +endif::[] -. Push the chart to your Quay repository using the `helm push` command: +. Push the chart to your repository using the `helm push` command: +ifeval::["{context}" == "quay-io"] ++ +[source,terminal] +---- +helm push etherpad-0.0.4.tgz oci://quay.io//helm +---- +endif::[] +ifeval::["{context}" == "use-quay"] + [source,terminal] ---- $ helm push etherpad-0.0.4.tgz oci://quay370.apps.quayperf370.perfscale.devcluster.openshift.com ---- +endif::[] + Example output: + @@ -64,7 +99,6 @@ Example output: ---- Pushed: quay370.apps.quayperf370.perfscale.devcluster.openshift.com/etherpad:0.0.4 Digest: sha256:a6667ff2a0e2bd7aa4813db9ac854b5124ff1c458d170b70c2d2375325f2451b - ---- . Ensure that the push worked by deleting the local copy, and then pulling the chart from the repository: @@ -74,10 +108,18 @@ Digest: sha256:a6667ff2a0e2bd7aa4813db9ac854b5124ff1c458d170b70c2d2375325f2451b $ rm -rf etherpad-0.0.4.tgz ---- + +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ helm pull oci://quay.io//helm/etherpad --version 0.0.4 +---- +endif::[] +ifeval::["{context}" == "use-quay"] [source,terminal] ---- $ helm pull oci://quay370.apps.quayperf370.perfscale.devcluster.openshift.com/etherpad --version 0.0.4 ---- +endif::[] + Example output: + diff --git a/modules/how-to-list-quay-repos.adoc b/modules/how-to-list-quay-repos.adoc new file mode 100644 index 000000000..b271bd331 --- /dev/null +++ b/modules/how-to-list-quay-repos.adoc @@ -0,0 +1,91 @@ +:_content-type: CONCEPT +[id="how-to-list-quay-repos"] += Listing more than 100 {productname} repositories using next_page + +When using the `curl` command to list {productname} repositories, you might encounter a limitation where only the first 100 repositories are displayed. + +To overcome the limitation and retrieve more than 100 repositories, the `next_page` token needs to be utilized. The `next_page` token allows you to fetch the next set of repositories. + +Use the following procedure to list more than 100 repositories. + +.Procedure + +. Enter the following `curl` command to retrieve the first 100 records along with the `next_page` token: ++ +[source,terminal] +---- +# curl -X GET -H "Authorization: Bearer ${TOKEN}" "https://${URL}/api/v1/repository?namespace=${ORGANIZATION}" | jq '.' | head -20 +---- ++ +.Example output ++ +[source,terminal] +---- +100 15092 100 15092 0 0 49320 0 --:--:-- --:--:-- --:--:-- 49159 +{ + "repositories": [ + { + "namespace": "redhat", + "name": "repo1", + "description": "description", + "is_public": true, + "kind": "image", + "state": "NORMAL", + "is_starred": false + }, + { + "namespace": "redhat", + "name": "repo2", + "description": "description", + "is_public": true, + "kind": "image", + "state": "NORMAL", + "is_starred": false + }, +... +gAAAAABhC5BunFXqUYIni1MZ_eXO8NL_TQEVzPEwpcUlnTMChM0YaNBiZwApkIllW5hpg8ARSBsuFg== <---- next_page token +---- + +. Use the `next_page` token obtained from the previous command to list more than 100 repositories. For example: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer Dz7hPQ..." "http://quay.example.com/api/v1/repository?namespace=redhat&next_page=gAAAAABhC7A...SBsuFg== " | jq '.' | head -20 +---- ++ +.Example output ++ +[source,terminal] +---- +% Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 15200 100 15200 0 0 51351 0 --:--:-- --:--:-- --:--:-- 51351 + + "repositories": [ + { + "namespace": "redhat", + "name": "repo101", + "description": "description", + "is_public": true, + "kind": "image", + "state": "NORMAL", + "is_starred": false + }, + { + "namespace": "redhat", + "name": "repo102", + "description": "description", + "is_public": true, + "kind": "image", + "state": "NORMAL", + "is_starred": false + }, ] +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6234121[How to list more than 100 Quay repositories using next_page token]. + + + diff --git a/modules/image-tags-overview.adoc b/modules/image-tags-overview.adoc new file mode 100644 index 000000000..126cfa1a5 --- /dev/null +++ b/modules/image-tags-overview.adoc @@ -0,0 +1,25 @@ +:_content-type: CONCEPT +[id="image-tags-overview"] += Image tags overview + +An _image tag_ refers to a label or identifier assigned to a specific version or variant of a container image. Container images are typically composed of multiple layers that represent different parts of the image. Image tags are used to differentiate between different versions of an image or to provide additional information about the image. + +Image tags have the following benefits: + +* *Versioning and Releases*: Image tags allow you to denote different versions or releases of an application or software. For example, you might have an image tagged as _v1.0_ to represent the initial release and _v1.1_ for an updated version. This helps in maintaining a clear record of image versions. + +* *Rollbacks and Testing*: If you encounter issues with a new image version, you can easily revert to a previous version by specifying its tag. This is helpful during debugging and testing phases. + +* *Development Environments*: Image tags are beneficial when working with different environments. You might use a _dev_ tag for a development version, _qa_ for quality assurance testing, and _prod_ for production, each with their respective features and configurations. + +* *Continuous Integration/Continuous Deployment (CI/CD)*: CI/CD pipelines often utilize image tags to automate the deployment process. New code changes can trigger the creation of a new image with a specific tag, enabling seamless updates. + +* *Feature Branches*: When multiple developers are working on different features or bug fixes, they can create distinct image tags for their changes. This helps in isolating and testing individual features. + +* *Customization*: You can use image tags to customize images with different configurations, dependencies, or optimizations, while keeping track of each variant. + +* *Security and Patching*: When security vulnerabilities are discovered, you can create patched versions of images with updated tags, ensuring that your systems are using the latest secure versions. + +* *Dockerfile Changes*: If you modify the Dockerfile or build process, you can use image tags to differentiate between images built from the previous and updated Dockerfiles. + +Overall, image tags provide a structured way to manage and organize container images, enabling efficient development, deployment, and maintenance workflows. \ No newline at end of file diff --git a/modules/java-image-scan-not-working.adoc b/modules/java-image-scan-not-working.adoc new file mode 100644 index 000000000..08c26dbdc --- /dev/null +++ b/modules/java-image-scan-not-working.adoc @@ -0,0 +1,58 @@ +:_content-type: PROCEDURE +[id="java-image-scan-not-working"] += Java image scanning not work with disconnected Clair + +In {productname} 3.8, Java image scanning does not work in a disconnected Clair environment. It requires an active connection to the internet. + +To resolve this issue, you must disable Java scanning from Clair, or connect to the internet. + +Use the following procedure to disable Java scanning from Clair. + +.Procedure + +. Check which SHA ID errors out by reaching out to the Maven indexer, for example: ++ +[source,terminal] +---- +{"level":"warn","file":"spring-web/lib/jcip-annotations-1.0.jar","layer":"sha256:7072d1ca8cd39f2ae4fd36d5a5272e4564a06c92441bdf29185c312ff87432ee","component":"java/Scanner.Scan","version":"3","scanner":"java","manifest":"sha256:d2eed634032c3827bd36f8aae86ef6113d9f4763fbeb6ad041b1f2a3962b6b24","state":"ScanLayers","kind":"package","error":"Get \"https://search.maven.org/solrsearch/select?q=1%3A%22afba4942caaeaf46aab0b976afd57cc7c181467e%22&wt=json\": dial tcp 52.1.120.204:443: i/o timeout","time":"2023-02-08T10:46:59Z","message":"error making request"} +---- + +. Run the following command to reveal which image this SHA ID belongs to: ++ +[source,terminal] +---- +quay=# SELECT t1.username AS namespace, t2.name AS repo_name, t4.content_checksum AS sha_digest FROM "user" AS t1 INNER JOIN repository AS t2 ON t1.id = t2.namespace_user_id INNER JOIN manifestblob AS t3 on t2.id = t3.repository_id INNER JOIN imagestorage AS t4 ON t3.blob_id = t4.id WHERE t4.content_checksum = 'sha256:0cea90e4778f9241c20421d8c97a8d182fd0fa51e6c84210dc4b57522fc901b8'; +---- ++ +.Example output ++ +[source,terminal] +---- +namespace | repo_name | sha_digest +-----------+-----------+------------------------------------------------------------------------- +redhat | quay | sha256:0cea90e4778f9241c20421d8c97a8d182fd0fa51e6c84210dc4b57522fc901b8 +---- + +. Run the following command to find the base operating system of the image, assuming it is Java-based: ++ +[source,terminal] +---- +$ podman run image:tag /bin/bash -c "cat /etc/*release" +---- + +. There are no documented steps to stop the Maven indexer. Run the following command in a development or test cluster first, setting the API request to a page that returns a `404` so that it fails quickly: ++ +[source,yaml] +---- +scanner: + package: + java: + api: https://quay.github.io/clair404 +---- ++ +Replace the API with a known page that returns error `404`. This should fail the Maven indexer and turn it off for that image. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/7003383[In disconnected Clair, java image scanning is not working]. \ No newline at end of file diff --git a/modules/keyless-authentication-robot-accounts.adoc b/modules/keyless-authentication-robot-accounts.adoc new file mode 100644 index 000000000..31ef6fc47 --- /dev/null +++ b/modules/keyless-authentication-robot-accounts.adoc @@ -0,0 +1,274 @@ +:_content-type: PROCEDURE +[id="keyless-authentication-robot-accounts"] += Keyless authentication with robot accounts + +In previous versions of {productname}, robot account tokens were valid for the lifetime of the token unless deleted or regenerated. Tokens that do not expire have security implications for users who do not want to store long-term passwords or manage the deletion, or regeneration, or new authentication tokens. + +With {productname} {producty}, {productname} administrators are provided the ability to exchange external OIDC tokens for short-lived, or _ephemeral_ robot account tokens with either Red Hat Single Sign-On (based on the Keycloak project) or Microsoft Entra ID. This allows robot accounts to leverage tokens that last one hour, which are are refreshed regularly and can be used to authenticate individual transactions. + +This feature greatly enhances the security of your {productname} registry by mitigating the possibility of robot token exposure by removing the tokens after one hour. + +Configuring keyless authentication with robot accounts is a multi-step procedure that requires setting a robot federation, generating an OAuth2 token from your OIDC provider, and exchanging the OAuth2 token for a robot account access token. + +[id="generating-oauth2-token-using-keycloak"] +== Generating an OAuth2 token with Red Hat Sign Sign-On + +The following procedure shows you how to generate an OAuth2 token using Red Hat Single Sign-On. Depending on your OIDC provider, these steps will vary. + +.Procedure + +. On the Red Hat Single Sign-On UI: + +.. Click *Clients* and then the name of the application or service that can request authentication of a user. + +.. On the *Settings* page of your client, ensure that the following options are set or enabled: ++ +* *Client ID* +* *Valid redirect URI* +* *Client authentication* +* *Authorization* +* *Standard flow* +* *Direct access grants* ++ +[NOTE] +==== +Settings can differ depending on your setup. +==== + +.. On the *Credentials* page, store the *Client Secret* for future use. + +.. On the *Users* page, click *Add user* and enter a username, for example, `service-account-quaydev`. Then, click *Create*. + +.. Click the name of of the user, for example *service-account-quaydev* on the *Users* page. + +.. Click the *Credentials* tab -> *Set password* -> and provide a password for the user. If warranted, you can make this password temporary by selecting the *Temporary* option. + +.. Click the *Realm settings* tab -> *OpenID Endpoint Configuration*. Store the `/protocol/openid-connect/token` endpoint. For example: ++ +[source,text] +---- +http://localhost:8080/realms/master/protocol/openid-connect/token +---- + +. On a web browser, navigate to the following URL: ++ +[source,text] +---- +http:///realms//protocol/openid-connect/auth?response_type=code&client_id= +---- + +. When prompted, log in with the *service-account-quaydev* user and the temporary password you set. Complete the login by providing the required information and setting a permanent password if necessary. + +. You are redirected to the URI address provided for your client. For example: ++ +[source,text] +---- +https://localhost:3000/cb?session_state=5c9bce22-6b85-4654-b716-e9bbb3e755bc&iss=http%3A%2F%2Flocalhost%3A8080%2Frealms%2Fmaster&code=ea5b76eb-47a5-4e5d-8f71-0892178250db.5c9bce22-6b85-4654-b716-e9bbb3e755bc.cdffafbc-20fb-42b9-b254-866017057f43 +---- ++ +Take note of the `code` provided in the address. For example: ++ +[source,text] +---- +code=ea5b76eb-47a5-4e5d-8f71-0892178250db.5c9bce22-6b85-4654-b716-e9bbb3e755bc.cdffafbc-20fb-42b9-b254-866017057f43 +---- ++ +[NOTE] +==== +This is a temporary code that can only be used one time. If necessary, you can refresh the page or revisit the URL to obtain another code. +==== + +. On your terminal, use the following `curl -X POST` command to generate a temporary OAuth2 access token: ++ +[source,terminal] +---- +$ curl -X POST "http://localhost:8080/realms/master/protocol/openid-connect/token" <1> +-H "Content-Type: application/x-www-form-urlencoded" \ +-d "client_id=quaydev" <2> +-d "client_secret=g8gPsBLxVrLo2PjmZkYBdKvcB9C7fmBz" <3> +-d "grant_type=authorization_code" +-d "code=ea5b76eb-47a5-4e5d-8f71-0892178250db.5c9bce22-6b85-4654-b716-e9bbb3e755bc.cdffafbc-20fb-42b9-b254-866017057f43" <4> +---- +<1> The `protocol/openid-connect/token` endpoint found on the *Realm settings* page of the Red Hat Single Sign-On UI. +<2> The Client ID used for this procedure. +<3> The Client Secret for the Client ID. +<4> The code returned from the redirect URI. ++ +.Example output ++ +[source,terminal] +---- +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJTVmExVHZ6eDd2cHVmc1dkZmc1SHdua1ZDcVlOM01DN1N5T016R0QwVGhVIn0...", +"expires_in":60,"refresh_expires_in":1800,"refresh_token":"eyJhbGciOiJIUzUxMiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJiNTBlZTVkMS05OTc1LTQwMzUtYjNkNy1lMWQ5ZTJmMjg0MTEifQ.oBDx6B3pUkXQO8m-M3hYE7v-w25ak6y70CQd5J8f5EuldhvTwpWrC1K7yOglvs09dQxtq8ont12rKIoCIi4WXw","token_type":"Bearer","not-before-policy":0,"session_state":"5c9bce22-6b85-4654-b716-e9bbb3e755bc","scope":"profile email"} +---- + +. Store the `access_token` from the previously step, as it will be exchanged for a {productname} robot account token in the following procedure. + +[id="setting-robot-federation"] +== Setting up a robot account federation by using the {productname} v2 UI + +The following procedure shows you how to set up a robot account federation by using the {productname} v2 UI. This procedure uses Red Hat Single Sign-On, which is based on the Keycloak project. These steps, and the information used to set up a robot account federation, will vary depending on your OIDC provider. + +.Prerequisites + +* You have created an organization. The following example uses `fed_test`. +* You have created a robot account. The following example uses `fest_test+robot1`. +* You have configured a OIDC for your {productname} deployment. The following example uses Red Hat Single Sign-On. + +.Procedure + +. On the Red Hat Single Sign-On main page: + +.. Select the appropriate realm that is authenticated for use with {productname}. Store the issuer URL, for example, `\https://keycloak-auth-realm.quayadmin.org/realms/quayrealm`. + +.. Click *Users* -> the name of the user to be linked with the robot account for authentication. You must use the same user account that you used when generating the OAuth2 access token. + +.. On the *Details* page, store the *ID* of the user, for example, `449e14f8-9eb5-4d59-a63e-b7a77c75f770`. ++ +[NOTE] +==== +The information collected in this step will vary depending on your OIDC provider. For example, with Red Hat Single Sign-On, the *ID* of a user is used as the *Subject* to set up the robot account federation in a subsequent step. For a different OIDC provider, like Microsoft Entra ID, this information is stored as the *Subject*. +==== + +. On your {productname} registry: + +.. Navigate to *Organizations* and click the name of your organization, for example, *fed_test*. + +.. Click *Robot Accounts*. + +.. Click the menu kebab -> *Set robot federation*. + +.. Click the *+* symbol. + +.. In the popup window, include the following information: ++ +* *Issuer URL*: `\https://keycloak-auth-realm.quayadmin.org/realms/quayrealm`. For Red Hat Single Sign-On, this is the the URL of your Red Hat Single Sign-On realm. This might vary depending on your OIDC provider. +* *Subject*: `449e14f8-9eb5-4d59-a63e-b7a77c75f770`. For Red Hat Single Sign-On, the *Subject* is the *ID* of your Red Hat Single Sign-On user. This varies depending on your OIDC provider. For example, if you are using Microsoft Entra ID, the *Subject* will be the *Subject* or your Entra ID user. + +.. Click *Save*. + +[id="exchanging-oauth2-robot-account-token"] +== Exchanging an OAuth2 access token for a {productname} robot account token + +The following procedure leverages the `access token` generated in the previous procedure to create a new {productname} robot account token. The new {productname} robot account token is used for authentication between your OIDC provider and {productname}. + +[NOTE] +==== +The following example uses a Python script to exchange the OAuth2 access token for a {productname} robot account token. +==== + +.Prerequisites + +* You have the `python3` CLI tool installed. + +.Procedure + +. Save the following Python script in a `.py` file, for example, `robot_fed_token_auth.py` ++ +[source,python] +---- +import requests +import os + +TOKEN=os.environ.get('TOKEN') +robot_user = "fed-test+robot1" + +def get_quay_robot_token(fed_token): + URL = "https:///oauth2/federation/robot/token" + response = requests.get(URL, auth=(robot_user,fed_token)) <1> + print(response) + print(response.text) + +if __name__ == "__main__": + get_quay_robot_token(TOKEN) +---- +<1> If your {productname} deployment is using custom SSL/TLS certificates, the response must be `response = requests.get(URL,auth=(robot_user,fed_token),verify=False)`, which includes the `verify=False` flag. + +. Export the OAuth2 access token as `TOKEN`. For example: ++ +[source,terminal] +---- +$ export TOKEN = eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJTVmExVHZ6eDd2cHVmc1dkZmc1SHdua1ZDcVlOM01DN1N5T016R0QwVGhVIn0... +---- + +. Run the `robot_fed_token_auth.py` script by entering the following command: ++ +[source,terminal] +---- +$ python3 robot_fed_token_auth.py +---- ++ +.Example output ++ +[source,terminal] +---- + +{"token": "291cmNlX2FjY2VzcyI6eyJhY2NvdW50Ijp7InJvbGVzIjpbIm1hbmFnZS1hY2NvdW50IiwibWFuYWdlLWFjY291bnQtbGlua3MiLCJ2aWV3LXByb2ZpbGUiXX19LCJzY29wZSI6InByb2ZpbGUgZW1haWwiLCJlbWFpbF92ZXJpZ..."} +---- ++ +[IMPORTANT] +==== +This token expires after one hour. After one hour, a new token must be generated. +==== + +. Export the robot account access token as `QUAY_TOKEN`. For example: ++ +[source,terminal] +---- +$ export QUAY_TOKEN=291cmNlX2FjY2VzcyI6eyJhY2NvdW50Ijp7InJvbGVzIjpbIm1hbmFnZS1hY2NvdW50IiwibWFuYWdlLWFjY291bnQtbGlua3MiLCJ2aWV3LXByb2ZpbGUiXX19LCJzY29wZSI6InByb2ZpbGUgZW1haWwiLCJlbWFpbF92ZXJpZ +---- + +[id="pushing-pulling-images-robot-account"] +== Pushing and pulling images + +After you have generated a new robot account access token and exported it, you can log in and the robot account using the access token and push and pull images. + +.Prerequisites + +* You have exported the OAuth2 access token into a new robot account access token. + +.Procedure + +. Log in to your {productname} registry using the `fest_test+robot1` robot account and the `QUAY_TOKEN` access token. For example: ++ +[source,terminal] +---- +$ podman login -u fed_test+robot1 -p $QUAY_TOKEN +---- + +. Pull an image from a {productname} repository for which the robot account has the proper permissions. For example: ++ +[source,terminal] +---- +$ podman pull /> +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 900e6061671b done +Copying config 8135583d97 done +Writing manifest to image destination +Storing signatures +8135583d97feb82398909c9c97607159e6db2c4ca2c885c0b8f590ee0f9fe90d +0.57user 0.11system 0:00.99elapsed 68%CPU (0avgtext+0avgdata 78716maxresident)k +800inputs+15424outputs (18major+6528minor)pagefaults 0swaps +---- + +. Attempt to pull an image from a {productname} repository for which the robot account does _not_ have the proper permissions. For example: ++ +[source,terminal] +---- +$ podman pull /> +---- ++ +.Example output ++ +[source,terminal] +---- +Error: initializing source docker://quay-server.example.com/example_repository/busybox:latest: reading manifest in quay-server.example.com/example_repository/busybox: unauthorized: access to the requested resource is not authorized +---- ++ +After one hour, the credentials for this robot account are set to expire. Afterwards, you must generate a new access token for this robot account. diff --git a/modules/ldap-filtering-intro.adoc b/modules/ldap-filtering-intro.adoc index f2f8f0c74..32844568d 100644 --- a/modules/ldap-filtering-intro.adoc +++ b/modules/ldap-filtering-intro.adoc @@ -1,5 +1,5 @@ [[ldap-filtering]] = LDAP filtering -Lightweight Directory Access Protocol (LDAP) is an open, vendor neutral, industry standard application protocol for accessing and maintaining distributed directory information services over an IP network. {productname} supports using LDAP as an identity provider. {productname} users can now apply additional filters for lookup queries if LDAP / AD authentication is used. For information on setting up LDAP authentication for {productname}, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/ldap-authentication-setup-for-quay-enterprise[LDAP authentication setup for {productname}]. +Lightweight Directory Access Protocol (LDAP) is an open, vendor neutral, industry standard application protocol for accessing and maintaining distributed directory information services over an IP network. {productname} supports using LDAP as an identity provider. {productname} users can now apply additional filters for lookup queries if LDAP / AD authentication is used. For information on setting up LDAP authentication for {productname}, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/ldap-authentication-setup-for-quay-enterprise[LDAP authentication setup for {productname}]. diff --git a/modules/ldap-timeouts-quay.adoc b/modules/ldap-timeouts-quay.adoc new file mode 100644 index 000000000..086c1b884 --- /dev/null +++ b/modules/ldap-timeouts-quay.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="ldap-timeouts-quay"] += Can I increase LDAP timeouts when accessing {productname}? + +When using LDAP as your authentication provider, some users might experience timeouts when accessing {productname}. You can increase the timeout value by adding the following properties to your `config.yaml` file: + +[source,yaml] +---- +LDAP_TIMEOUT: 60 +LDAP_NETWORK_TIMEOUT: 60 +---- + +This increases the timeout to 60 seconds. The default time for this field is 10 seconds. + +If you are using a standalone version of {productname}, redeploy {productname} after updating your `config.yaml` file. + +If you are using the {productname} Operator, update the `config-bundle-secret` with the latest configuration. \ No newline at end of file diff --git a/modules/limit-organization-creation.adoc b/modules/limit-organization-creation.adoc new file mode 100644 index 000000000..eafccc4ac --- /dev/null +++ b/modules/limit-organization-creation.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="limit-organization-creation"] += Can I limit normal users from creating organizations in {productname}? + +Currently, there is no way to limit normal users from creating organizations in {productname}. \ No newline at end of file diff --git a/modules/listing-repos-superuser-api.adoc b/modules/listing-repos-superuser-api.adoc new file mode 100644 index 000000000..e9be21e0e --- /dev/null +++ b/modules/listing-repos-superuser-api.adoc @@ -0,0 +1,54 @@ +[id="listing-logs-superuser-api"] += Listing logs as a superuser with the {productname} API + +{productname} superusers can list usage logs for the current system. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listalllogs[`GET /api/v1/superuser/logs`] endpoint to list the usage logs for the current system: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/logs?starttime=&endtime=&page=&next_page=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"start_time": "Mon, 17 Feb 2025 19:29:14 -0000", "end_time": "Wed, 19 Feb 2025 19:29:14 -0000", "logs": [{"kind": "login_success", "metadata": {"type": "quayauth", "useragent": "Mozilla/5.0 (X11; Linux x86_64; rv:134.0) Gecko/20100101 Firefox/134.0"}, "ip": "192.168.1.131", "datetime": "Tue, 18 Feb 2025 19:28:15 -0000", "namespace": {"kind": "user", "name": "quayadmin", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}}}], "next_page": "gAAAAABntN-KbPJDI0PpcHmWjRCmQTLiCprE_KXiOSidbGZ7Ireu8pVTgGUIstijNhmiLzlAv_S3HOsCrKWnuBmoQYZ3F53Uxg=="} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getregistrysize[`GET /api/v1/superuser/registrysize/`] end point to obtain information about the size of the registry: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/registrysize/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"size_bytes": 0, "last_ran": null, "running": false, "queued": false} +---- +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getregistrysize[`POST /api/v1/superuser/registrysize/`] end point to define registry size information: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/registrysize/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "namespace": "", + "last_ran": 1700000000, + "queued": true, + "running": false + }' +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/logging-into-quayio.adoc b/modules/logging-into-quayio.adoc new file mode 100644 index 000000000..725df6bbc --- /dev/null +++ b/modules/logging-into-quayio.adoc @@ -0,0 +1,62 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="logging-into-quay"] += Logging into Quay + +A user account for {quayio} represents an individual with authenticated access to the platform's features and functionalities. Through this account, you gain the capability to create and manage repositories, upload and retrieve container images, and control access permissions for these resources. This account is pivotal for organizing and overseeing your container image management within {quayio}. + +[NOTE] +==== +Not all features on {quayio} require that users be logged in. For example, you can anonymously pull an image from {quayio} without being logged in, so long as the image you are pulling comes from a public repository. +==== + +Users have two options for logging into {quayio}: + +* By logging in through {quayio}. ++ +This option provides users with the legacy UI, as well as an option to use the beta UI environment, which adheres to link:https://www.patternfly.org/[PatternFly] UI principles. ++ +* By logging in through the link:console.redhat.com/quay[Red Hat Hybrid Cloud Console]. ++ +This option uses Red Hat SSO for authentication, and is a public managed service offering by Red Hat. This option _always_ requires users to login. Like other managed services, Quay on the Red Hat Hybrid Cloud Console enhances the user experience by adhering to link:https://www.patternfly.org/[PatternFly] UI principles. + +Differences between using {quayio} directly and Quay on the link:console.redhat.com/quay[Red Hat Hybrid Cloud Console] are negligible, including for users on the free tier. Whether you are using {quayio} directly, on the Hybrid Cloud Console, features that require login, such as pushing to a repository, use your {quayio} username specifications. + +[id="logging-into-quayio"] +== Logging into {quayio} + +Use the following procedure to log into {quayio}. + +.Prerequisites + +* You have created a Red Hat account and a {quayio} account. For more information, see "Creating a {quayio} account". + +.Procedure + +. Navigate to link:quay.io[{quayio}]. + +. In the navigation pane, select *Sign In* and log in using your Red Hat credentials. + +. If it is your first time logging in, you must confirm the automatically-generated username. Click *Confirm Username* to log in. ++ +You are redirected to the {quayio} repository landing page. ++ +image:quayio-repo-landing-page.png[{quayio} repository landing page] + +[id="logging-into-quay-hybrid-cloud-console"] +== Logging into Quay through the Hybrid Cloud Console + +.Prerequisites + +* You have created a Red Hat account and a {quayio} account. For more information, see "Creating a {quayio} account". + +.Procedure + +. Navigate to the link:console.redhat.com/quay[Quay on the Red Hat Hybrid Cloud Console] and log in using your Red Hat account. You are redirected to the Quay repository landing page: ++ +image:quay-hybrid-cloud-landing-page.png[Quay on the Red Hat Hybrid Cloud Console] diff --git a/modules/managing-a-team-api.adoc b/modules/managing-a-team-api.adoc new file mode 100644 index 000000000..a1c5634e0 --- /dev/null +++ b/modules/managing-a-team-api.adoc @@ -0,0 +1,12 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-a-team-api"] += Managing a team by using the {productname} API + +After you have created a team, you can use the API to obtain information about team permissions or team members, add, update, or delete team members (including by email), or delete an organization team. + +The following procedures show you how to how to manage a team using the {productname} API. \ No newline at end of file diff --git a/modules/managing-builds-api.adoc b/modules/managing-builds-api.adoc new file mode 100644 index 000000000..0a7b1d6e5 --- /dev/null +++ b/modules/managing-builds-api.adoc @@ -0,0 +1,84 @@ +[id="manage-builds-api"] += Managing builds by using the {productname} API + +Builds can be managed by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listbuildtriggers[`GET /api/v1/repository/{repository}/trigger/`] endpoint to list the triggers for the specified repository: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"triggers": [{"id": "32ca5eae-a29f-46c7-8f44-3221ca417c92", "service": "custom-git", "is_active": false, "build_source": null, "repository_url": null, "config": {}, "can_invoke": true, "enabled": true, "disabled_reason": null}]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#activatebuildtrigger[`POST /api/v1/repository/{repository}/trigger/{trigger_uuid}/activate`] endpoint to activate the specified build trigger. ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/activate" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "config": { + "branch": "main" + }, + "pull_robot": "example+robot" + }' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#manuallystartbuildtrigger[`POST /api/v1/repository/{repository}/trigger/{trigger_uuid}/start`] endpoint to manually start the build from the specified trigger: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/start" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "branch_name": "main", + "commit_sha": "abcdef1234567890", + "refs": "refs/heads/main" + }' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listtriggerrecentbuilds[`GET /api/v1/repository/{repository}/trigger/{trigger_uuid}/builds`] endpoint to list the builds started by the specified trigger: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/builds?limit=10" \ + -H "Authorization: Bearer " +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getbuildtrigger[`GET /api/v1/repository/{repository}/trigger/{trigger_uuid}`] endpoint to get information for the specified build trigger: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updatebuildtrigger[`PUT /api/v1/repository/{repository}/trigger/{trigger_uuid}`] endpoint to update the specified build trigger: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"enabled": true}' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletebuildtrigger[`DELETE /api/v1/repository/{repository}/trigger/{trigger_uuid}`] endpoint to delete the specified build trigger: ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/managing-namespace-auto-pruning-policies.adoc b/modules/managing-namespace-auto-pruning-policies.adoc new file mode 100644 index 000000000..37454fa99 --- /dev/null +++ b/modules/managing-namespace-auto-pruning-policies.adoc @@ -0,0 +1,583 @@ +:_content-type: PROCEDURE +[id="managing-namespace-auto-pruning-policies"] += Managing auto-pruning policies using the {productname} UI + +//All API content in this module needs removed and replaced with the modules that exist in the /api/ guide. + +All auto-pruning policies, with the exception of a registry-wide auto pruning policy, are created using the {productname} v2 UI or by using the API. This can be done after you have configured your {productname} `config.yaml` file to enable the auto-pruning feature and the v2 UI. + +[NOTE] +==== +This feature is not available when using the {productname} legacy UI. +==== + +[id="configuring-namespace-auto-prune-feature"] +== Configuring the {productname} auto-pruning feature + +Use the following procedure to configure your {productname} `config.yaml` file to enable the auto-pruning feature. + +.Prerequisites + +* You have set `FEATURE_UI_V2` to `true` in your `config.yaml` file. + +.Procedure + +* In your {productname} `config.yaml` file, add, and set, the `FEATURE_AUTO_PRUNE` environment variable to `True`. For example: ++ +[source,yaml] +---- +# ... +FEATURE_AUTO_PRUNE: true +# ... +---- + +[id="creating-registry-wide-auto-pruning-policy"] +== Creating a registry-wide auto-pruning policy + +Registry-wide auto-pruning policies can be configured on new and existing organizations. This feature saves {productname} administrators time, effort, and storage by enforcing registry-wide rules. + +{productname} administrators must enable this feature by updating their `config.yaml` file through the inclusion of `DEFAULT_NAMESPACE_AUTOPRUNE_POLICY` configuration field, and one of `number_of_tags` or `creation_date` methods. Currently, this feature cannot be enabled by using the v2 UI or the API. + +Use the following procedure to create an auto-prune policy for your {productname} registry. + +.Prerequisites + +* You have enabled the `FEATURE_AUTO_PRUNE` feature. + +.Procedure + +. Update your `config.yaml` file to add the `DEFAULT_NAMESPACE_AUTOPRUNE_POLICY` configuration field: + +.. To set the policy method to remove the oldest tags by their creation date until the number of tags provided is left, use the `number_of_tags` method: ++ +[source,yaml] +---- +# ... +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: number_of_tags + value: 2 <1> +# ... +---- +<1> In this scenario, two tags remain. + +.. To set the policy method to remove tags with a creation date older than the provided time span, for example, `5d`, use the `creation_date` method: ++ +[source,yaml] +---- +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: creation_date + value: 5d +---- + +. Restart your {productname} deployment. + +. Optional. If you need to tag and push images to test this feature: + +.. Tag four sample images that will be pushed to a {productname} registry. For example: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test +---- ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test2 +---- ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test3 +---- ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test4 +---- + +.. Push the four sample images to the registry with auto-pruning enabled by entering the following commands: ++ +[source,terminal] +---- +$ podman push /quayadmin/busybox:test +---- ++ +[source,terminal] +---- +$ podman push //busybox:test2 +---- ++ +[source,terminal] +---- +$ podman push //busybox:test3 +---- ++ +[source,terminal] +---- +$ podman push //busybox:test4 +---- + +. Check that there are four tags in the registry that you pushed the images to. + +. By default, the auto-pruner worker at the registry level runs every 24 hours. After 24 hours, the two oldest image tags are removed, leaving the `test3` and `test4` tags if you followed these instructions. Check your {productname} organization to ensure that the two oldest tags were removed. + +[id="creating-policy-v2-ui"] +== Creating an auto-prune policy for an organization by using the {productname} v2 UI + +Use the following procedure to create an auto-prune policy for an organization using the {productname} v2 UI. + +.Prerequisites + +* You have enabled the `FEATURE_AUTO_PRUNE` feature. +* Your organization has image tags that have been pushed to it. + +.Procedure + +. On the {productname} v2 UI, click *Organizations* in the navigation pane. + +. Select the name of an organization that you will apply the auto-pruning feature to, for example, `test_organization`. + +. Click *Settings*. + +. Click *Auto-Prune Policies*. For example: ++ +image:auto-prune-policies-page.png[Auto-Prune Policies page] + +. Click the drop down menu and select the desired policy, for example, *By number of tags*. + +. Select the desired number of tags to keep. By default, this is set at *20* tags. For this example, the number of tags to keep is set at *3*. + +. Optional. With the introduction of _regular expressions_, you are provided the following options to fine-grain your auto-pruning policy: ++ +* *Match*: When selecting this option, the auto-pruner prunes all tags that match the given _regex_ pattern. +* *Does not match*: When selecting this option, the auto-pruner prunes all tags that _do not_ match the _regex_ pattern. ++ +If you do not select an option, the auto-pruner defaults to pruning all image tags. ++ +For this example, click the *Tag pattern* box and select *match*. In the regex box, enter a pattern to match tags against. For example, to automatically prune all `test` tags, enter `^test.*`. + +. Optional. You can create a second auto-prune policy by clicking *Add Policy* and entering the required information. + +. Click *Save*. A notification that your auto-prune policy has been updated appears. ++ +With this example, the organization is configured to keep the three latest tags that are named `^test.*`. + +.Verification + +* Navigate to the *Tags* page of your Organization's repository. After a few minutes, the auto-pruner worker removes tags that no longer fit within the established criteria. In this example, it removes the `busybox:test` tag, and keeps the `busybox:test2`, `busybox:test3`, and `busybox:test4` tag. ++ +After tags are automatically pruned, they go into the {productname} time machine, or the amount of time after a tag is deleted that the tag is accessible before being garbage collected. The expiration time of an image tag is dependent on your organization's settings. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#garbage-collection[{productname} garbage collection]. + +[id="creating-policy-api"] +== Creating an auto-prune policy for a namespace by using the {productname} API + +You can use {productname} API endpoints to manage auto-pruning policies for an namespace. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationautoprunepolicy[`POST /api/v1/organization/{orgname}/autoprunepolicy/`] command create a new policy that limits the number of tags allowed in an organization: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/organization//autoprunepolicy/ +---- ++ +Alternatively, you can can set tags to expire for a specified time after their creation date: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ +"method": "creation_date", "value": "7d"}' http:///api/v1/organization//autoprunepolicy/ +---- ++ +.Example output +[source,terminal] +---- +{"uuid": "73d64f05-d587-42d9-af6d-e726a4a80d6e"} +---- + +. Optional. You can add an additional policy to an organization and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "creation_date", + "value": "7d", + "tagPattern": "^v*", + "tagPatternMatches": <1> + }' \ + "https:///api/v1/organization//autoprunepolicy/" +---- +<1> Setting `tagPatternMatches` to `true` makes it so that tags that match the given regex pattern will be pruned. In this example, tags that match `^v*` are pruned. ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ebf7448b-93c3-4f14-bf2f-25aa6857c7b0"} +---- + +. You can update your organization's auto-prune policy by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationautoprunepolicy[`PUT /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' "/api/v1/organization//autoprunepolicy/" +---- ++ +This command does not return output. Continue to the next step. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "ebf7448b-93c3-4f14-bf2f-25aa6857c7b0", "method": "creation_date", "value": "4d", "tagPattern": "^v*", "tagPatternMatches": true}, {"uuid": "da4d0ad7-3c2d-4be8-af63-9c51f9a501bc", "method": "number_of_tags", "value": 10, "tagPattern": null, "tagPatternMatches": true}, {"uuid": "17b9fd96-1537-4462-a830-7f53b43f94c2", "method": "creation_date", "value": "7d", "tagPattern": "^v*", "tagPatternMatches": true}]} +---- + +. You can delete the auto-prune policy for your organization by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/73d64f05-d587-42d9-af6d-e726a4a80d6e +---- + +[id="creating-policy-api-current-user"] +== Creating an auto-prune policy for a namespace for the current user by using the API + +You can use {productname} API endpoints to manage auto-pruning policies for your account. + +[NOTE] +==== +The use of `/user/` in the following commands represents the user that is currently logged into {productname}. +==== + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following `POST` command create a new policy that limits the number of tags for the current user: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/user/autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859"} +---- + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/8c03f995-ca6f-4928-b98d-d75ed8c14859 +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859", "method": "number_of_tags", "value": 10}]} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/8c03f995-ca6f-4928-b98d-d75ed8c14859 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859"} +---- + +[id="creating-policy-repository-v2-ui"] +== Creating an auto-prune policy for a repository using the {productname} v2 UI + +Use the following procedure to create an auto-prune policy for a repository using the {productname} v2 UI. + +.Prerequisites + +* You have enabled the `FEATURE_AUTO_PRUNE` feature. +* You have pushed image tags to your repository. + +.Procedure + +. On the {productname} v2 UI, click *Repository* in the navigation pane. + +. Select the name of an organization that you will apply the auto-pruning feature to, for example, `/`. + +. Click *Settings*. + +. Click *Repository Auto-Prune Policies*. + +. Click the drop down menu and select the desired policy, for example, *By age of tags*. + +. Set a time, for example, `5` and an interval, for example `minutes` to delete tags older than the specified time frame. For this example, tags older than 5 minutes are marked for deletion. + +. Optional. With the introduction of _regular expressions_, you are provided the following options to fine-grain your auto-pruning policy: ++ +* *Match*: When selecting this option, the auto-pruner prunes all tags that match the given _regex_ pattern. +* *Does not match*: When selecting this option, the auto-pruner prunes all tags that _do not_ match the _regex_ pattern. ++ +If you do not select an option, the auto-pruner defaults to pruning all image tags. ++ +For this example, click the *Tag pattern* box and select *Does not match*. In the _regex_ box, enter a pattern to match tags against. For example, to automatically prune all tags that _do not_ match the `test` tag, enter `^test.*`. + +. Optional. You can create a second auto-prune policy by clicking *Add Policy* and entering the required information. + +. Click *Save*. A notification that your auto-prune policy has been updated appears. + +.Verification + +* Navigate to the *Tags* page of your Organization's repository. With this example, Tags that are older than 5 minutes that _do not_ match the `^test.*` _regex_ tag are automatically pruned when the pruner runs. ++ +After tags are automatically pruned, they go into the {productname} time machine, or the amount of time after a tag is deleted that the tag is accessible before being garbage collected. The expiration time of an image tag is dependent on your organization's settings. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#garbage-collection[{productname} garbage collection]. + +[id="creating-repository-policy-api"] +== Creating an auto-prune policy for a repository using the {productname} API + +You can use {productname} API endpoints to manage auto-pruning policies for an repository. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationautoprunepolicy[`POST /api/v1/repository/{repository}/autoprunepolicy/`] command create a new policy that limits the number of tags allowed in an organization: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can can set tags to expire for a specified time after their creation date: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "creation_date", "value": "7d"}' http:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7"} +---- + +. Optional. You can add an additional policy and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "", + "value": "<7d>", + "tagPattern": "<^test.>*", + "tagPatternMatches": <1> + }' \ + "https:///api/v1/repository///autoprunepolicy/" +---- +<1> Setting `tagPatternMatches` to `false` makes it so that tags that all tags that _do not_ match the given regex pattern are pruned. In this example, all tags _but_ `^test.` are pruned. ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "b53d8d3f-2e73-40e7-96ff-736d372cd5ef"} +---- + +. You can update your policy for the repository by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updaterepositoryautoprunepolicy[`PUT /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid}`] command and passing in the UUID. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": "5", + "tagPattern": "^test.*", + "tagPatternMatches": true + }' \ + "https://quay-server.example.com/api/v1/repository///autoprunepolicy/" +---- ++ +This command does not return output. Continue to the next step to check your auto-prune policy. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7", "method": "number_of_tags", "value": 10}]} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7"} +---- + +[id="creating-policy-api-other-user"] +== Creating an auto-prune policy on a repository for a user with the API + +You can use {productname} API endpoints to manage auto-pruning policies on a repository for user accounts that are not your own, so long as you have `admin` privileges on the repository. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. +* You have `admin` privileges on the repository that you are creating the policy for. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createuserautoprunepolicy[`POST /api/v1/repository///autoprunepolicy/`] command create a new policy that limits the number of tags for the user: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' https:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "7726f79c-cbc7-490e-98dd-becdc6fefce7"} +---- + +. Optional. You can add an additional policy for the current user and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "creation_date", + "value": "7d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' \ + "http:///api/v1/repository///autoprunepolicy/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "b3797bcd-de72-4b71-9b1e-726dabc971be"} +---- + +. You can update your policy for the current user by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateuserautoprunepolicy[`PUT /api/v1/repository///autoprunepolicy/`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^test.", + "tagPatternMatches": true + }' "https:///api/v1/repository///autoprunepolicy/" +---- ++ +Updating a policy does not return output in the CLI. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/7726f79c-cbc7-490e-98dd-becdc6fefce7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "81ee77ec-496a-4a0a-9241-eca49437d15b", "method": "creation_date", "value": "7d", "tagPattern": "^v*", "tagPatternMatches": true} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "7726f79c-cbc7-490e-98dd-becdc6fefce7"} +---- diff --git a/modules/managing-organization-quota-superuser-api.adoc b/modules/managing-organization-quota-superuser-api.adoc new file mode 100644 index 000000000..e799e28d0 --- /dev/null +++ b/modules/managing-organization-quota-superuser-api.adoc @@ -0,0 +1,69 @@ +[id="managing-organization-quota-superuser-api"] += Managing organization quota with the {productname} API + +Quota can be managed with the {productname} API with superuser admin privileges. These endpoints allow superusers to manage quota policies for all organizations within the registry. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createuserquotasuperuser[`POST /api/v1/superuser/organization/{namespace}/quota`] API endpoint to create a quota policy for an organization: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240 + }' +---- ++ +.Example output ++ +[source,terminal] +---- +"Created" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listuserquotasuperuser[`GET /api/v1/superuser/organization/{namespace}/quota`] API endpoint to obtain information about the policy, including the quota ID: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser/organization//quota" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 2, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}], "default_config_exists": false}] +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeuserquotasuperuser[`PUT /api/v1/superuser/organization/{namespace}/quota/{quota_id}`] API endpoint to change the quota policy: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/superuser/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 2, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}], "default_config_exists": false} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteuserquotasuperuser[`DELETE /api/v1/superuser/organization/{namespace}/quota/{quota_id}`] API endpoint to ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/superuser/organization//quota/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-organization-superuser-api.adoc b/modules/managing-organization-superuser-api.adoc new file mode 100644 index 000000000..b10a36cd3 --- /dev/null +++ b/modules/managing-organization-superuser-api.adoc @@ -0,0 +1,56 @@ +[id="organization-manage-api"] += Managing organizations as a superuser with the {productname} API + +Superusers have the ability to list, change, and delete organizations by using the {productname} API. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listallorganizations[`GET /api/v1/superuser/organizations`] endpoint to list all organizations: ++ +[source,terminal] +---- +$ curl -L -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/organizations?name=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"organizations": [{"name": "fed_test", "email": "fe11fc59-bd09-459a-a21c-b57692d151c9", "avatar": {"name": "fed_test", "hash": "e2ce1fb42ec2e0602362beb64b5ebd1e6ad291b710a0355f9296c16157bef3cb", "color": "#ff7f0e", "kind": "org"}, "quotas": [{"id": 3, "limit_bytes": 10737418240, "limits": []}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}}, {"name": "test", "email": "new-contact@test-org.com", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "org"}, "quotas": [{"id": 2, "limit_bytes": 10737418240, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}]}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}}]} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizatio[`PUT /api/v1/superuser/organizations/{name}`] endpoint to change or update information for an organization: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "email": "", + "invoice_email": , + "invoice_email_address": "", + "tag_expiration_s": + }' \ + "https:///api/v1/superuser/organizations/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test", "email": "new-contact@test-org.com", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "org"}, "quotas": [{"id": 2, "limit_bytes": 10737418240, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}]}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganization[`DELETE /api/v1/superuser/organizations/{name}`] endpoint to delete and organization: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/organizations/" +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-restricted-users.adoc b/modules/managing-restricted-users.adoc new file mode 100644 index 000000000..bba1a0d04 --- /dev/null +++ b/modules/managing-restricted-users.adoc @@ -0,0 +1,41 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: REFERENCE +[id="managing-restricted-users"] += Managing restricted users + +By default, all {productname} members part of a registry can create repositories and upload content to their own user account. For example, when `user1` pushes an artifact tag such as `//:`, a repository of the name `user1/image` is created. Inside of that repository is information about the artifact tag. + +With the `FEATURE_RESTRICTED_USERS` configuration field, {productname} administrators can restrict all users that are part of their registry from pushing images or artifacts to the registry. This configuration field effectively renders all users from creating new organizations or pushing content altogether _unless they are already part of that organization and defined as a team member of that organization_; that is, restricted users still have normal permissions in organizations based on the teams that they are members of. + +For example, a {productname} administrator sets the `FEATURE_RESTRICTED_USERS` configuration field in their `config.yaml` file as follows: + +[source,yaml] +---- +FEATURE_RESTRICTED_USERS: true +---- + +When set as shown, `user1` is unable to create a new organization by using the {productname} UI. Upon attempt, the following error message is returned: `Unauthorized`. Additionally, if `user1` attempts to push an image to their own namespace by using the CLI (that is, `//:`), the following error message is returned: `Error: writing blob: initiating layer upload to /v2/user1//blobs/uploads/ in : unauthorized: access to the requested resource is not authorized`. However, if `user1` is part of an organization's team as defined by an administrator, they maintain the permissions capable of that team. For example, if `user1` is added to an organization's team and given the *Admin* role, they have administrative privileges for that organization. + +When `FEATURE_RESTRICTED_USERS` is leveraged with the `RESTRICTED_USERS_WHITELIST` configuration field, however, {productname} administrators can allow specified members the ability to continue to push to the registry or make organizations. In general, when `FEATURE_RESTRICTED_USERS` is set, {productname} administrators might also set `RESTRICTED_USERS_WHITELIST`, otherwise all members of the registry (with the exception of those defined by a team) are rendered incapable of doing basic tasks. + +For example, a {productname} administrator sets the `FEATURE_RESTRICTED_USERS` and `RESTRICTED_USERS_WHITELIST` configuration fields in their `config.yaml` file as follows: + +[source,yaml] +---- +# ... +FEATURE_RESTRICTED_USERS: true +RESTRICTED_USERS_WHITELIST: + - user2 +# ... +---- + +With this configuration, all users _except_ `user2` are restricted from pushing images or creating organizations. Other users part of a team will also have these privileges. Users part of the registry that are either not defined by the `RESTRICTED_USERS_WHITELIST` field or part of an organization's team have no permissions within the registry, and will therefor be unable to perform basic tasks. + +[NOTE] +==== +This feature works differently for LDAP deployment types. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3/html-single/manage_red_hat_quay/index#ldap-authentication-setup-for-quay-enterprise[LDAP authentication setup for {productname}]. +==== \ No newline at end of file diff --git a/modules/managing-robot-account-permissions-v2-ui.adoc b/modules/managing-robot-account-permissions-v2-ui.adoc new file mode 100644 index 000000000..24a2d8a8d --- /dev/null +++ b/modules/managing-robot-account-permissions-v2-ui.adoc @@ -0,0 +1,36 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-robot-account-permissions-v2-ui"] += Bulk managing robot account repository access + +Use the following procedure to manage, in bulk, robot account repository access by using the {productname} v2 UI. + +.Prerequisites + +* You have created a robot account. +* You have created multiple repositories under a single organization. + +.Procedure + +. On the {productname} v2 UI landing page, click *Organizations* in the navigation pane. + +. On the *Organizations* page, select the name of the organization that has multiple repositories. The number of repositories under a single organization can be found under the *Repo Count* column. + +. On your organization's page, click *Robot accounts*. + +. For the robot account that will be added to multiple repositories, click the kebab icon -> *Set repository permissions*. + +. On the *Set repository permissions* page, check the boxes of the repositories that the robot account will be added to. For example: ++ +image:set-repository-permissions-robot-account.png[Set repository permissions] + +. Set the permissions for the robot account, for example, *None*, *Read*, *Write*, *Admin*. + +. Click *save*. An alert that says *Success alert: Successfully updated repository permission* appears on the *Set repository permissions* page, confirming the changes. + +. Return to the *Organizations* -> *Robot accounts* page. Now, the *Repositories* column of your robot account shows the number of repositories that the robot account has been added to. \ No newline at end of file diff --git a/modules/managing-service-keys-api.adoc b/modules/managing-service-keys-api.adoc new file mode 100644 index 000000000..b246f8ccf --- /dev/null +++ b/modules/managing-service-keys-api.adoc @@ -0,0 +1,107 @@ +[id="service-key-manage-api"] += Managing service keys as a superuser with the {productname} API + +Superusers have the ability to create, list, change, and delete service keys by using the {productname} API. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createservicekey[`POST /api/v1/superuser/keys`] endpoint to create a service key: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "service": "", + "expiration": + }' \ + "/api/v1/superuser/keys" +---- ++ +.Example output ++ +[source,terminal] +---- +{"message":""} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#approveservicekey[`POST /api/v1/superuser/approvedkeys/{kid}`] endpoint to approved a service key: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "notes": "" + }' \ + "https:///api/v1/superuser/approvedkeys/" +---- ++ +This command does not return output in the CLI. + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listservicekeys[`GET /api/v1/superuser/keys`] endpoint to list service keys: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys" +---- ++ +.Example output ++ +[source,terminal] +---- +{"keys":[{"approval":{"approval_type":"ServiceKeyApprovalType.AUTOMATIC","approved_date":"Mon, 20 Jan 2025 14:46:01 GMT","approver":null,"notes":""},"created_date":"Mon, 20 Jan 2025 14:46:01 GMT","expiration_date":"Wed, 05 Feb 2025 22:03:37 GMT","jwk":{"e":"AQAB","kid":"","kty":"RSA","n":""},"kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","metadata":{"created_by":"CLI tool"},"name":"http://quay-server.example.com:80","rotation_duration":null,"service":"quay"}]} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getservicekey[`GET /api/v1/superuser/keys/{kid}`] endpoint to retrieve information about a service account by its kid: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"approval":{"approval_type":"ServiceKeyApprovalType.AUTOMATIC","approved_date":"Mon, 20 Jan 2025 14:46:01 GMT","approver":null,"notes":""},"created_date":"Mon, 20 Jan 2025 14:46:01 GMT","expiration_date":"Wed, 05 Feb 2025 22:03:37 GMT","jwk":{"e":"AQAB","kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","kty":"RSA","n":"5iMX7RQ_4F_zdb1qonMsuWUDauCOqEyRpD8L_EhgnwDxrgMHuOlJ4_7sEOrOa3Jkx3QhwIW6LJCP69PR5X0wvz6vmC1DoWEaWv41bAq23Knzj7gUU9-N_fkZPZN9NQwZ-D-Zqg9L1c_cJF93Dy93py8_JswWFDj1FxMaThJmrX68wBwjhF-JLYqgCAGFyezzJ3oTpO-esV9v6R7skfkaqtx_cjLZk_0cKB4VKTtxiy2A8D_5nANTOSSbZLXNh2Vatgh3yrOmnTTNLIs0YO3vFIuylEkczHlln-40UMAzRB3HNspUySyzImO_2yGdrA762LATQrOzJN8E1YKCADx5CQ"},"kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","metadata":{"created_by":"CLI tool"},"name":"http://quay-server.example.com:80","rotation_duration":null,"service":"quay"} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateservicekey[`PUT /api/v1/superuser/keys/{kid}`] endpoint to update your service key, such as the metadata: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "", + "metadata": {"": ""}, + "expiration": + }' \ + "https:///api/v1/superuser/keys/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"approval":{"approval_type":"ServiceKeyApprovalType.AUTOMATIC","approved_date":"Mon, 20 Jan 2025 14:46:01 GMT","approver":null,"notes":""},"created_date":"Mon, 20 Jan 2025 14:46:01 GMT","expiration_date":"Mon, 03 Mar 2025 10:40:00 GMT","jwk":{"e":"AQAB","kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","kty":"RSA","n":"5iMX7RQ_4F_zdb1qonMsuWUDauCOqEyRpD8L_EhgnwDxrgMHuOlJ4_7sEOrOa3Jkx3QhwIW6LJCP69PR5X0wvz6vmC1DoWEaWv41bAq23Knzj7gUU9-N_fkZPZN9NQwZ-D-Zqg9L1c_cJF93Dy93py8_JswWFDj1FxMaThJmrX68wBwjhF-JLYqgCAGFyezzJ3oTpO-esV9v6R7skfkaqtx_cjLZk_0cKB4VKTtxiy2A8D_5nANTOSSbZLXNh2Vatgh3yrOmnTTNLIs0YO3vFIuylEkczHlln-40UMAzRB3HNspUySyzImO_2yGdrA762LATQrOzJN8E1YKCADx5CQ"},"kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","metadata":{"created_by":"CLI tool","environment":"production"},"name":"quay-service-key-updated","rotation_duration":null,"service":"quay"} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteservicekey[`DELETE /api/v1/superuser/keys/{kid}`] endpoint to delete a service key: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys/" +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-superuser-full-access.adoc b/modules/managing-superuser-full-access.adoc new file mode 100644 index 000000000..f8cfe9707 --- /dev/null +++ b/modules/managing-superuser-full-access.adoc @@ -0,0 +1,28 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: REFERENCE +[id="managing-superuser-access"] += Managing superuser access to organizations + +When a user, for example, `user1` creates an organization within a registry, they own the access and permissions to that organization. As such, they can create repositories, define teams and memberships, create robot accounts, set default permissions, view logs, and adjust other settings as warranted. It is, for all intents and purposes, the user's organization. + +By default, superusers do not have access to a user's organization. However, {productname} administrators can use the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field to grant superusers the ability to read, write, and delete content from other repositories in namespaces or organizations that they do not own or have explicit permissions for. + +[NOTE] +==== +* This feature is only available on the beta of the new UI. When enabled, it shows all organizations that the superuser has access to. +* When this field is enabled, the superuser cannot view the image repository of every organization at once. This is a known limitation and will be fixed in a future version of {productname}. As a temporary workaround, the superuser can view image repositories by navigating to them from the *Organizations* page. +==== + +To grant superusers full access to all organizations within the registry, you can use the following YAML configuration: + +[source,yaml] +---- +# ... +FEATURE_SUPERUSERS_FULL_ACCESS: true +# ... +---- + +After sitting `FEATURE_SUPERUSERS_FULL_ACCESS: true`, all organizations will be visible on the superuser's *Organization* page. \ No newline at end of file diff --git a/modules/managing-tags-api.adoc b/modules/managing-tags-api.adoc new file mode 100644 index 000000000..1d2ce57f8 --- /dev/null +++ b/modules/managing-tags-api.adoc @@ -0,0 +1,67 @@ +[id="tag-api"] += Managing tags with the {productname} API + +Tags can be changed, restored, deleted, or listed by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changetag[`PUT /api/v1/repository/{repository}/tag/{tag}`] endpoint to change which image a tag points to or create a new tag: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/repository///tag/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"manifest_digest": ""}' +---- ++ +.Example output ++ +[source,terminal] +---- +"Updated" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#restoretag[`POST /api/v1/repository/{repository}/tag/{tag}/restore`] endpoint to restore a repository tag back to a previous image in the repository: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository///tag//restore" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"manifest_digest": "sha256:"}' +---- ++ +.Example output ++ +[source,terminal] +---- +{} + +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] endpoint to obtain a list of repository tags: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository///tag/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test", "reversion": true, "start_ts": 1740496373, "manifest_digest": "sha256:d08334991a3dba62307016833083d6433f489ab0f7d36d0a4771a20b4569b2f6", "is_manifest_list": false, "size": 2280303, "last_modified": "Tue, 25 Feb 2025 15:12:53 -0000"}, {"name": "test", "reversion": false, "start_ts": 1740495442, "end_ts": 1740496373, "manifest_digest": "sha256:d08334991a3dba62307016833083d6433f489ab0f7d36d0a4771a20b4569b2f6", "is_manifest_list": false, "size": 2280303, "last_modified": "Tue, 25 Feb 2025 14:57:22 -0000", "expiration": "Tue, 25 Feb 2025 15:12:53 -0000"}, {"name": "test", "reversion": false, "start_ts": 1740495408, "end_ts": 1740495442, "manifest_digest": "sha256:d08334991a3dba62307016833083d6433f489ab0f7d36d0a4771a20b4569b2f6", "is_manifest_list": false, "size": 2280303, "last_modified": "Tue, 25 Feb 2025 14:56:48 -0000", "expiration": "Tue, 25 Feb 2025 14:57:22 -0000"}], "page": 1, "has_additional": false} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletefulltag[`DELETE /api/v1/repository/{repository}/tag/{tag}`] endpoint to delete a tag from a repository: ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository///tag/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-team-members-api.adoc b/modules/managing-team-members-api.adoc new file mode 100644 index 000000000..b4efb638d --- /dev/null +++ b/modules/managing-team-members-api.adoc @@ -0,0 +1,76 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-team-members-api"] += Managing team members and repository permissions by using the API + +Use the following procedures to add a member to a team (by direct invite or by email), or to remove a member from a team. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationteammember[`PUT /api/v1/organization/{orgname}/team/{teamname}/members/{membername}`] command to add or invite a member to an existing team: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "testuser", "kind": "user", "is_robot": false, "avatar": {"name": "testuser", "hash": "d51d17303dc3271ac3266fb332d7df919bab882bbfc7199d2017a4daac8979f0", "color": "#5254a3", "kind": "user"}, "invited": false} +---- + +* Enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationteammember[`DELETE /api/v1/organization/{orgname}/team/{teamname}/members/{membername}`] command to remove a member of a team: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members/" +---- ++ +This command does not an output in the CLI. To ensure that a member has been deleted, you can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationteammembers[`GET /api/v1/organization/{orgname}/team/{teamname}/members`] command and ensure that the member is not returned in the output. ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "owners", "members": [{"name": "quayadmin", "kind": "user", "is_robot": false, "avatar": {"name": "quayadmin", "hash": "b28d563a6dc76b4431fc7b0524bbff6b810387dac86d9303874871839859c7cc", "color": "#17becf", "kind": "user"}, "invited": false}, {"name": "test-org+test", "kind": "user", "is_robot": true, "avatar": {"name": "test-org+test", "hash": "aa85264436fe9839e7160bf349100a9b71403a5e9ec684d5b5e9571f6c821370", "color": "#8c564b", "kind": "robot"}, "invited": false}], "can_edit": true} +---- + +* You can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#inviteteammemberemail[`PUT /api/v1/organization/{orgname}/team/{teamname}/invite/{email}`] command to invite a user, by email address, to an existing team: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//invite/" +---- + +* You can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteteammemberemailinvite[`DELETE /api/v1/organization/{orgname}/team/{teamname}/invite/{email}`] command to delete the invite of an email address to join a team. For example: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//invite/" +---- \ No newline at end of file diff --git a/modules/managing-team-members-repo-permissions-ui.adoc b/modules/managing-team-members-repo-permissions-ui.adoc new file mode 100644 index 000000000..81ac5b070 --- /dev/null +++ b/modules/managing-team-members-repo-permissions-ui.adoc @@ -0,0 +1,25 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-team-members-repo-permissions-ui"] +== Managing team members and repository permissions + +Use the following procedure to manage team members and set repository permissions. + +* On the *Teams and membership* page of your organization, you can also manage team members and set repository permissions. + +** Click the kebab menu, and select one of the following options: ++ +** **Manage Team Members**. On this page, you can view all members, team members, robot accounts, or users who have been invited. You can also add a new team member by clicking *Add new member*. ++ +** **Set repository permissions**. On this page, you can set the repository permissions to one of the following: ++ +*** *None*. Team members have no permission to the repository. +*** *Read*. Team members can view and pull from the repository. +*** *Write*. Team members can read (pull) from and write (push) to the repository. +*** *Admin*. Full access to pull from, and push to, the repository, plus the ability to do administrative tasks associated with the repository. ++ +** **Delete**. This popup windows allows you to delete the team by clicking *Delete*. \ No newline at end of file diff --git a/modules/managing-team-ui.adoc b/modules/managing-team-ui.adoc new file mode 100644 index 000000000..f35348c67 --- /dev/null +++ b/modules/managing-team-ui.adoc @@ -0,0 +1,10 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-a-team-ui"] += Managing a team by using the UI + +After you have created a team, you can use the UI to manage team members, set repository permissions, delete the team, or view more general information about the team. \ No newline at end of file diff --git a/modules/managing-teams-api.adoc b/modules/managing-teams-api.adoc new file mode 100644 index 000000000..51616463b --- /dev/null +++ b/modules/managing-teams-api.adoc @@ -0,0 +1,10 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-teams-api"] += Managing teams by using the API + +Team can be managing by using the {productname} API. diff --git a/modules/managing-user-options-api.adoc b/modules/managing-user-options-api.adoc new file mode 100644 index 000000000..6e813d3e2 --- /dev/null +++ b/modules/managing-user-options-api.adoc @@ -0,0 +1,81 @@ +[id="manage-user-options-api"] += Managing current user options by using the {productname} API + +Some user options, like starring a repository, or getting information about your account, are available with the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getloggedinuser[`GET /api/v1/user/`] endpoint to get user information for the authenticated user. ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"anonymous": false, "username": "quayadmin", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "can_create_repo": true, "is_me": true, "verified": true, "email": "test@gmil.com", "logins": [], "invoice_email": false, "invoice_email_address": null, "preferred_namespace": false, "tag_expiration_s": 1209600, "prompts": [], "company": null, "family_name": null, "given_name": null, "location": null, "is_free_account": true, "has_password_set": true, "quotas": [{"id": 4, "limit_bytes": 2199023255552, "limits": [{"id": 3, "type": "Reject", "limit_percent": 100}]}], "quota_report": {"quota_bytes": 2280675, "configured_quota": 2199023255552, "running_backfill": "complete", "backfill_status": "complete"}, "organizations": [{"name": "test", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "org"}, "can_create_repo": true, "public": false, "is_org_admin": true, "preferred_namespace": false}, {"name": "sample", "avatar": {"name": "sample", "hash": "ba560c68f1d26e8c6b911ac9b5d10d513e7e43e576cc2baece1b8a46f36a29a5", "color": "#b5cf6b", "kind": "org"}, "can_create_repo": true, "public": false, "is_org_admin": true, "preferred_namespace": false}], "super_user": true} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserinformation[`GET /api/v1/users/{username}`] endpoint to get user information for the specified user. ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/users/example_user" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"anonymous": false, "username": "testuser", "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}, "super_user": false} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createstar[`POST /api/v1/user/starred`] endpoint to star a repository: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/user/starred" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "namespace": "", + "repository": "" + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "test", "repository": "testrepo"} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/3.13/html-single/red_hat_quay_api_reference/index#liststarredrepos[`GET /api/v1/user/starred`] endpoint to list all starred repositories: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/starred?next_page=" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"repositories": [{"namespace": "test", "name": "testrepo", "description": "This repository is now under maintenance.", "is_public": true}]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/3.13/html-single/red_hat_quay_api_reference/index#deletestar[`DELETE /api/v1/user/starred/{repository}`] endpoint to delete a star from a repository: ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/user/starred/namespace/repository-name" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-user-quota-superuser-api.adoc b/modules/managing-user-quota-superuser-api.adoc new file mode 100644 index 000000000..508df051f --- /dev/null +++ b/modules/managing-user-quota-superuser-api.adoc @@ -0,0 +1,69 @@ +[id="managing-user-quota-superuser-api"] += Managing user quota with the {productname} API + +As a superuser, you can manage user quota for specified organizations. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquotasuperuser[`POST /api/v1/superuser/users/{namespace}/quota`] endpoint to create a quota policy for specific users within an organization: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/users//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- ++ +.Example output ++ +[source,terminal] +---- +"Created" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorganizationquotasuperuser[`GET /api/v1/superuser/users/{namespace}/quota`] endpoint to return a list of a user's allotted quota: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser/users//quota" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 6, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false}] +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquotasuperuser[`PUT /api/v1/superuser/users/{namespace}/quota/{quota_id}`] endpoint to adjust the user's policy: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/superuser/users//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 6, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationquotasuperuser[`DELETE /api/v1/superuser/users/{namespace}/quota/{quota_id}`] endpoint to delete a user's policy: ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/superuser/users//quota/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/manually-triggering-a-build-trigger.adoc b/modules/manually-triggering-a-build-trigger.adoc new file mode 100644 index 000000000..bbff1bd1a --- /dev/null +++ b/modules/manually-triggering-a-build-trigger.adoc @@ -0,0 +1,43 @@ +:_content-type: CONCEPT +[id="manually-triggering-a-build-trigger"] += Manually triggering a build + +_Builds_ can be triggered manually by using the following procedure. + +.Procedure + +. On the *Builds* page, *Start new build*. + +. When prompted, select *Invoke Build Trigger*. + +. Click *Run Trigger Now* to manually start the process. + +. Enter a commit ID from which to initiate the build, for example, `1c002dd`. ++ +After the build starts, you can see the _build ID_ on the *Repository Builds* page. + +ifeval::["{context}" == "quay-builders-image-automation"] +. You can check the status of your _build_ by clicking the commit in the *Build History* page, or by running the following command: ++ +---- +$ oc get pods -n virtual-builders +---- ++ +.Example output +---- +NAME READY STATUS RESTARTS AGE +f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s +---- + +. After the _build_ has completed, the `oc get pods -n virtual-builders` command returns no resources: ++ +[source,terminal] +---- +$ oc get pods -n virtual-builders +---- ++ +.Example output +---- +No resources found in virtual-builders namespace. +---- +endif::[] \ No newline at end of file diff --git a/modules/mapping-repositories-to-cpe-information.adoc b/modules/mapping-repositories-to-cpe-information.adoc index 86695ec8d..fee95ee39 100644 --- a/modules/mapping-repositories-to-cpe-information.adoc +++ b/modules/mapping-repositories-to-cpe-information.adoc @@ -6,7 +6,7 @@ [id="mapping-repositories-to-cpe-information"] = Mapping repositories to Common Product Enumeration information -Clair's {rhel} scanner relies on a Common Product Enumeration (CPE) file to map RPM packages to the corresponding security data to produce matching results. These files are owned by product security and updated daily. +Clair's {rhel} scanner relies on a Common Product Enumeration (CPE) file to map RPM packages to the corresponding security data to produce matching results. Red{nbsp}Hat Product Security maintains and regularly updates these files. The CPE file must be present, or access to the file must be allowed, for the scanner to properly process RPM packages. If the file is not present, RPM packages installed in the container image will not be scanned. @@ -18,11 +18,12 @@ The CPE file must be present, or access to the file must be allowed, for the sca | `names2repos` | link:https://access.redhat.com/security/data/metrics/container-name-repos-map.json[Red Hat Name-to-Repos JSON]. |=== -In addition to uploading CVE information to the database for disconnected Clair installations, you must also make the mapping file available locally: +By default, Clair's indexer includes the `repos2cpe` and `names2repos` data files within the Clair container. This means that you can reference `/data/repository-to-cpe.json` and `/data/container-name-repos-map.json` in your `clair-config.yaml` file without the need for additional configuration. -* For standalone {productname} and Clair deployments, the mapping file must be loaded into the Clair pod. - -* For {productname} Operator deployments on {ocp} and Clair deployments, you must set the Clair component to `unamanged`. Then, Clair must be deployed manually, setting the configuration to load a local copy of the mapping file. +[IMPORTANT] +==== +Although Red{nbsp}Hat Product Security updates the `repos2cpe` and `names2repos` files regularly, the versions included in the `Clair` container are only updated with {productname} releases (for example, version 3.14.1 -> 3.14.2). This can lead to discrepancies between the latest CPE files and those bundled with Clair." +==== [id="mapping-repositories-to-cpe-configuration"] == Mapping repositories to Common Product Enumeration example configuration @@ -32,13 +33,13 @@ Use the `repo2cpe_mapping_file` and `name2repos_mapping_file` fields in your Cla [source,yaml] ---- indexer: - scanner: + scanner: repo: rhel-repository-scanner: - repo2cpe_mapping_file: /data/cpe-map.json + repo2cpe_mapping_file: /data/repository-to-cpe.json package: rhel_containerscanner: - name2repos_mapping_file: /data/repo-map.json + name2repos_mapping_file: /data/container-name-repos-map.json ---- For more information, see link:https://www.redhat.com/en/blog/how-accurately-match-oval-security-data-installed-rpms[How to accurately match OVAL security data to installed RPMs]. \ No newline at end of file diff --git a/modules/marathon-mesos-fail.adoc b/modules/marathon-mesos-fail.adoc new file mode 100644 index 000000000..29ec9fd37 --- /dev/null +++ b/modules/marathon-mesos-fail.adoc @@ -0,0 +1,117 @@ +:_content-type: CONCEPT +[id="marathon-mesos-fail"] += Pulling private images with Marathon or Mesos fails + +When using Marathon or Mesos, attempting to pull an image from a private repository fails with the following error: `msg="Error: Status 403 trying to pull repository repo/project: \"{\\\"error\\\": \\\"Permission Denied\\\"}\""`. + +As a workaround, you must copy the Docker configuration file's credentials on to the worker machines. For more information about configuring Mesos registry authentication, see link:https://mesosphere.github.io/marathon/docs/native-docker-private-registry.html[Using a Private Docker Registry]. + +When using Mesos app definitions, credentials must be provided as a URI that must be accessible by all nodes that might start your application. Approaches include distributing the file to the local filesystem of all nodes, for example through RSYNC/SCP, or storing it on a shared network drive, for example Amazon S3. It is worth considering the security implications of each approach. + +[id="deployment-docker-1-6-earlier"] +== For deployments using Docker 1.6 or earlier + +Use the following steps to configure Marathon or Mesos for use on private registries with Docker 1.6 or earlier. + +.Procedure + +. Download a configuration from a `Quay.io` credentials dialog, or log in to the private repository manually: ++ +[source,terminal] +---- +$ docker login quay.io +---- ++ +This create a configuration file in `$HOME/.dockercfg`. + +. Add the `.dockercfg` to the `uris` field of your Mesos app definition. The `$HOME` environment variable must then be set to the same value as `$MESOS_SANDBOX` so that Docker can automatically pick up the configuration file. The following is an example app definition: ++ +[source,yaml] +---- +{ + "id": "/some/name/or/id", + "cpus": 1, + "mem": 1024, + "instances": 1, + "container": { + "type": "DOCKER", + "docker": { + "image": "some.docker.host.com/namespace/repo", + "network": "HOST" + } + }, + "uris": [ + "file:///etc/.dockercfg" + ] +} +---- + +[id="deployment-docker-1-6-later"] +== For deployments using Docker 1.6 or later + +Use the following steps to configure Marathon or Mesos for use on private registries with Docker 1.6 or later. + +. Download a configuration from a `Quay.io` credentials dialog, or log in to the private repository manually: ++ +[source,terminal] +---- +$ docker login quay.io +---- ++ +This create a configuration file in `$HOME/.dockercfg/config.json`. + +. `Tar` and `GZIP` the `$HOME/.DOCKER` directory and its contents: ++ +[source,terminal] +---- +$ cd $HOME +---- ++ +[source,terminal] +---- +$ tar czf docker.tar.gz .docker +---- + +. Enter the following command to ensure that both the directory and the configuration are inside of the `tar`: ++ +[source,terminal] +---- +$ tar -tvf $HOME/docker.tar.gz +---- ++ +.Exampe output ++ +[source,terminal] +---- +drwx------ root/root 0 2015-07-28 02:54 .docker/ +-rw------- root/root 114 2015-07-28 01:31 .docker/config.json +---- + +. Optional. Put the `.tar` file into a directory readably by Mesos: ++ +[source,terminal] +---- +$ cp docker.tar.gz /etc/ +---- + +. Add the file to the `uris` field of your Mesos app definition: ++ +[source,yaml] +---- +{ + "id": "/some/name/or/id", + "cpus": 1, + "mem": 1024, + "instances": 1, + "container": { + "type": "DOCKER", + "docker": { + "image": "some.docker.host.com/namespace/repo", + "network": "HOST" + } + }, + "uris": [ + "file:///etc/docker.tar.gz" + ] +} +---- \ No newline at end of file diff --git a/modules/mirror-quay-api.adoc b/modules/mirror-quay-api.adoc new file mode 100644 index 000000000..e862092ea --- /dev/null +++ b/modules/mirror-quay-api.adoc @@ -0,0 +1,90 @@ +:_content-type: CONCEPT +[id="quay-mirror-api"] += Using the API to mirror a repository + +{productname} administrators can mirror external repositories by using the API. + +.Prerequisites + +* You have set `FEATURE_REPO_MIRROR: true` in your `config.yaml` file. + +.Procedure + +* Create a new repository mirror configuration by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createrepomirrorconfig[`POST /api/v1/repository/{repository}/mirror`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "is_enabled": , + "external_reference": "", + "external_registry_username": "", + "external_registry_password": "", + "sync_start_date": "", + "sync_interval": , + "robot_username": "", + "root_rule": { + "rule": "", + "rule_type": "" + } + }' +---- + +* You can return information about the mirror configuration by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepomirrorconfig[`GET /api/v1/repository/{repository}/mirror`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"is_enabled": true, "mirror_type": "PULL", "external_reference": "https://quay.io/repository/argoproj/argocd", "external_registry_username": null, "external_registry_config": {}, "sync_interval": 86400, "sync_start_date": "2025-01-15T12:00:00Z", "sync_expiration_date": null, "sync_retries_remaining": 3, "sync_status": "NEVER_RUN", "root_rule": {"rule_kind": "tag_glob_csv", "rule_value": ["*.latest*"]}, "robot_username": "quayadmin+mirror_robot"} +---- + +* You can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#syncnow[`POST /api/v1/repository/{repository}/mirror/sync-now`] endpoint to sync the repositories. For example: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror/sync-now" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. + +* Alternatively, you can cancel the sync with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#synccancel[`POST /api/v1/repository/{repository}/mirror/sync-cancel`] endpoint.For example: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror/sync-cancel" \ +---- ++ +This command does not return output in the CLI. + +* After creating a mirror configuration, you can make changes with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepomirrorconfig[`PUT /api/v1/repository/{repository}/mirror`] command. For example, you might choose to disable automatic synchronizations: ++ +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "is_enabled": , <1> + "external_reference": "", + "external_registry_username": "", + "external_registry_password": "", + "sync_start_date": "", + "sync_interval": , + "robot_username": "", + "root_rule": { + "rule": "", + "rule_type": "" + } + }' +---- +<1> Disables automatic synchronization. \ No newline at end of file diff --git a/modules/mirrored-images-unable-pull-rhocp.adoc b/modules/mirrored-images-unable-pull-rhocp.adoc new file mode 100644 index 000000000..a6bccacb8 --- /dev/null +++ b/modules/mirrored-images-unable-pull-rhocp.adoc @@ -0,0 +1,20 @@ +:_content-type: CONCEPT +[id="mirrored-images-unable-pull-rhocp"] += Unable to pull mirrored images to {productname} on {ocp} + +After mirroring images into the {productname} registry on {ocp} using the `oc adm catalog` mirror command, you might receive the following error when attempting to use that mirrored image: `Failed to pull image "//:": rpc error: code = Unknown desc = reading manifest 1-191a in //:: unauthorized: access to the requested resource is not authorized`. This occurs when images are pushed to the {productname} registry without an existing repository. When this happens, a new, private, registry is created automatically. This restrains the kubelet on {ocp} nodes from pulling images and deploying the pod successfully. + +As a workaround to this issue, you can set the `CREATE_PRIVATE_REPO_ON_PUSH` to `false` in your `config.yaml` file. For example: + +[source,yaml] +---- +CREATE_PRIVATE_REPO_ON_PUSH: false +---- + +This helps create a public repository when you first push the image to the {productname} registry when using the `oc adm catalog mirror` command. + + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6966410[Images mirrored to Quay can't be pulled in RHOCP]. \ No newline at end of file diff --git a/modules/mirroring-api-intro.adoc b/modules/mirroring-api-intro.adoc index 6c9200be1..8fb67cf34 100644 --- a/modules/mirroring-api-intro.adoc +++ b/modules/mirroring-api-intro.adoc @@ -7,4 +7,4 @@ You can use the {productname} API to configure repository mirroring: .Mirroring API image:swagger-mirroring.png[Mirroring API] -More information is available in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_api_guide/index[{productname} API Guide] +More information is available in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API Guide] diff --git a/modules/mirroring-creating-repo.adoc b/modules/mirroring-creating-repo.adoc index 4fa487eb6..734e73113 100644 --- a/modules/mirroring-creating-repo.adoc +++ b/modules/mirroring-creating-repo.adoc @@ -1,15 +1,23 @@ -[[mirroring-creating-repo]] +:_content-type: PROCEDURE +[id="mirroring-creating-repo"] = Creating a mirrored repository -The steps shown in this section assume you already have enabled repository mirroring in the configuration for your {productname} cluster and that you have a deployed a mirroring worker. - -When mirroring a repository from an external container registry, create a new private repository. Typically the same name is used as the target repository, for example, `quay-rhel8`: +When mirroring a repository from an external container registry, you must create a new private repository. Typically, the same name is used as the target repository, for example, `quay-rhel8`. image:repo_quay_rhel8.png[Create new {productname} repo] - +[id="mirroring-repository-mirroring-settings"] == Repository mirroring settings +Use the following procedure to adjust the settings of your mirrored repository. + +.Prerequisites + +* You have enabled repository mirroring in your {productname} configuration file. +* You have deployed a mirroring worker. + +.Procedure + . In the Settings tab, set the Repository State to `Mirror`: + image:repo_mirror_create.png[Create a new {productname} repo mirror] @@ -30,17 +38,25 @@ image:repo-mirror-details-start.png[Repository mirroring] * **Password:** The password associated with the Username. Note that the password cannot include characters that require an escape character (\). +[id="mirroring-advanced-settings"] == Advanced settings -* In the Advanced Settings section, configure TLS and proxy, if required: +In the *Advanced Settings* section, you can configure SSL/TLS and proxy with the following options: + +* **Verify TLS:** Select this option if you want to require HTTPS and to verify certificates when communicating with the target remote registry. +* **Accept Unsigned Images:** Selecting this option allows unsigned images to be mirrored. +* **HTTP Proxy:** Select this option if you want to require HTTPS and to verify certificates when communicating with the target remote registry. +* **HTTPS PROXY:** Identify the HTTPS proxy server needed to access the remote site, if a proxy server is needed. +* **No Proxy:** List of locations that do not require proxy. -* **Verify TLS:** Check this box if you want to require HTTPS and to verify certificates, when communicating with the target remote registry. -* **HTTP Proxy:** Identify the HTTP proxy server needed to access the remote site, if one is required. -* **HTTPS Proxy:** Identify the HTTPS proxy server needed to access the remote site, if one is required. -* **No Proxy:** List of locations that do not require proxy +[id="mirroring-synchronize-now"] == Synchronize now +Use the following procedure to initiate the mirroring operation. + +.Procedure + * To perform an immediate mirroring operation, press the Sync Now button on the repository's Mirroring tab. The logs are available on the Usage Logs tab: + image:repo-mirror-usage-logs.png[Usage logs] @@ -49,6 +65,6 @@ When the mirroring is complete, the images will appear in the Tags tab: + image:repo-mirror-tags.png[Repository mirroring tags] + -Below is an example of a completed Repository Mirroring screen: +Below is an example of a completed Repository Mirroring screen: + image:repo-mirror-details.png[Repository mirroring details] diff --git a/modules/mirroring-invalid-credentials.adoc b/modules/mirroring-invalid-credentials.adoc new file mode 100644 index 000000000..8717b6a16 --- /dev/null +++ b/modules/mirroring-invalid-credentials.adoc @@ -0,0 +1,12 @@ +:_content-type: PROCEDURE +[id="mirroring-invalid-credentials"] += Invalid credentials when mirroring + +In some cases, {productname} mirroring might fail and return the following error: `repomirrorworker stdout | time="2022-11-03T16:46:11Z" level=debug msg="Accessing \"registry.redhat.io/rhel8/nginx-118:1\" failed: unable to retrieve auth token: invalid username/password: unauthorized:`. when the {productname} cluster is missing a connection to the LoadBalancer. Consequently, {productname} is unable to connect to the network. + +To resolve this issue, ensure that your {productname} cluster has a stable connection to the LoadBalancer. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6989386[Quay Mirroring fail with invalid credentials]. \ No newline at end of file diff --git a/modules/missing-runc-files.adoc b/modules/missing-runc-files.adoc new file mode 100644 index 000000000..b0a378503 --- /dev/null +++ b/modules/missing-runc-files.adoc @@ -0,0 +1,22 @@ +:_content-type: PROCEDURE +[id="missing-runc-files"] += Missing runc files prevent containers from running + +When attempting to start containers using the Podman client tool, users encounter an error due to missing runc files. The error message indicates a permission issue related to adding a seccomp filter rule for the `bdflush` syscall, leading to the container startup failure. + +The cause of this issue is the absence of required `runc` files in the older version of Podman. These missing files prevent the proper execution of containers, resulting in the encountered error. Updating Podman ensures that the necessary runc files are present, enabling the successful deployment of containers. + +To resolve this issue, it is recommended to update the Podman version to obtain the updated `runc` files. By updating Podman, the missing runc files will be installed, allowing containers to be deployed successfully. + +Use the following command to update Podman: +[source,terminal] +---- +# yum update podman -y +---- + +After updating Podman, restart the containers, and the error caused by missing runc files should no longer occur. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/articles/6981027[Quay containers can't run due to missing runc files]. \ No newline at end of file diff --git a/modules/monitoring-single-namespace.adoc b/modules/monitoring-single-namespace.adoc index 4df04b386..79945a1b7 100644 --- a/modules/monitoring-single-namespace.adoc +++ b/modules/monitoring-single-namespace.adoc @@ -1,26 +1,43 @@ -[[monitoring-single-namespace]] -= Enabling monitoring when Operator is installed in a single namespace +:_content-type: PROCEDURE +[id="monitoring-single-namespace"] += Enabling monitoring when the {productname} Operator is installed in a single namespace -When {productname} Operator is installed in a single namespace, the monitoring component is unmanaged. To configure monitoring, you need to enable it for user-defined namespaces in OpenShift Container Platform. For more information, see the OCP documentation for link:https://docs.openshift.com/container-platform/4.7/monitoring/configuring-the-monitoring-stack.html[Configuring the monitoring stack] and link:https://docs.openshift.com/container-platform/4.7/monitoring/enabling-monitoring-for-user-defined-projects.html[Enabling monitoring for user-defined projects]. +[NOTE] +==== +Currently, enabling monitoring when the {productname} Operator is installed in a single namespace is not supported on IBM Power and IBM Z. +==== -The following steps show you how to configure monitoring for Quay, based on the OCP documentation. +When the {productname} Operator is installed in a single namespace, the monitoring component is set to `unmanaged`. To configure monitoring, you must enable it for user-defined namespaces in {ocp}. +For more information, see the {ocp} documentation for link:https://docs.openshift.com/container-platform/{ocp-y}/monitoring/configuring-the-monitoring-stack.html[Configuring the monitoring stack] and link:https://docs.openshift.com/container-platform/{ocp-y}/monitoring/enabling-monitoring-for-user-defined-projects.html[Enabling monitoring for user-defined projects]. + +The following sections shows you how to enable monitoring for {productname} based on the {ocp} documentation. + +[id="creating-cluster-monitoring-config-map"] == Creating a cluster monitoring config map -. Check whether the `cluster-monitoring-config` ConfigMap object exists: +Use the following procedure check if the `cluster-monitoring-config` `ConfigMap` object exists. + +.Procedure + +. Enter the following command to check whether the `cluster-monitoring-config` ConfigMap object exists: + -``` +[source,terminal] +---- $ oc -n openshift-monitoring get configmap cluster-monitoring-config - +---- ++ +.Example output ++ +[source,terminal] +---- Error from server (NotFound): configmaps "cluster-monitoring-config" not found -``` +---- -. If the ConfigMap object does not exist:  -.. Create the following YAML manifest. In this example, the file is called `cluster-monitoring-config.yaml`: +. Optional: If the `ConfigMap` object does not exist, create a YAML manifest. In the following example, the file is called `cluster-monitoring-config.yaml`. + +[source,terminal] ---- -$ cat cluster-monitoring-config.yaml - apiVersion: v1 kind: ConfigMap metadata: @@ -29,37 +46,60 @@ metadata: data: config.yaml: | ---- -.. Create the ConfigMap object: + +. Optional: If the `ConfigMap` object does not exist, create the `ConfigMap` object: + +[source,terminal] ---- -$ oc apply -f cluster-monitoring-config.yaml configmap/cluster-monitoring-config created +$ oc apply -f cluster-monitoring-config.yaml ---- + +.Example output ++ +[source,terminal] +---- +configmap/cluster-monitoring-config created ---- -$ oc -n openshift-monitoring get configmap cluster-monitoring-config +. Ensure that the `ConfigMap` object exists by running the following command: ++ +[source,terminal] +---- +$ oc -n openshift-monitoring get configmap cluster-monitoring-config +---- ++ +.Example output ++ +[source,terminal] +---- NAME DATA AGE cluster-monitoring-config 1 12s ---- +[id="creating-user-defined-workload-monitoring-config-map"] +== Creating a user-defined workload monitoring ConfigMap object +Use the following procedure check if the `user-workload-monitoring-config` `ConfigMap` object exists. -== Creating a user-defined workload monitoring config map +.Procedure -. Check whether the `user-workload-monitoring-config` ConfigMap object exists: +. Enter the following command to check whether the `user-workload-monitoring-config` `ConfigMap` object exists: + ---- $ oc -n openshift-user-workload-monitoring get configmap user-workload-monitoring-config - +---- ++ +.Example output ++ +[source,terminal] +---- Error from server (NotFound): configmaps "user-workload-monitoring-config" not found ---- -. If the ConfigMap object does not exist: -.. Create the following YAML manifest. In this example, the file is called `user-workload-monitoring-config.yaml`: +. If the `ConfigMap` object does not exist, create a YAML manifest. In the following example, the file is called `user-workload-monitoring-config.yaml`. + +[source,terminal] ---- -$ cat user-workload-monitoring-config.yaml - apiVersion: v1 kind: ConfigMap metadata: @@ -68,34 +108,49 @@ metadata: data: config.yaml: | ---- -.. Create the ConfigMap object: + +. Optional: Create the `ConfigMap` object by entering the following command: + +[source,terminal] ---- $ oc apply -f user-workload-monitoring-config.yaml - +---- ++ +.Example output ++ +[source,terminal] +---- configmap/user-workload-monitoring-config created ---- +[id="enabling-monitoring-user-defined-projects"] +== Enable monitoring for user-defined projects +Use the following procedure to enable monitoring for user-defined projects. +.Procedure -== Enable monitoring for user-defined projects - -. Check whether monitoring for user-defined projects is running: +. Enter the following command to check if monitoring for user-defined projects is running: + +[source,terminal] ---- $ oc get pods -n openshift-user-workload-monitoring - +---- ++ +.Example output ++ +[source,terminal] +---- No resources found in openshift-user-workload-monitoring namespace. ---- -. Edit the  `cluster-monitoring-config` ConfigMap: +. Edit the `cluster-monitoring-config` `ConfigMap` by entering the following command: + ---- $ oc -n openshift-monitoring edit configmap cluster-monitoring-config ---- -  -. Set `enableUserWorkload: true` to enable monitoring for user-defined projects on the cluster: + +. Set `enableUserWorkload: true` in your `config.yaml` file to enable monitoring for user-defined projects on the cluster: + [source,yaml] ---- @@ -107,11 +162,17 @@ kind: ConfigMap metadata: annotations: ---- -. Save the file to apply the changes and then check that the appropriate pods are running: + +. Enter the following command to save the file, apply the changes, and ensure that the appropriate pods are running: + ---- $ oc get pods -n openshift-user-workload-monitoring - +---- ++ +.Example output ++ +[source,terminal] +---- NAME READY STATUS RESTARTS AGE prometheus-operator-6f96b4b8f8-gq6rl 2/2 Running 0 15s prometheus-user-workload-0 5/5 Running 1 12s @@ -119,14 +180,18 @@ prometheus-user-workload-1 5/5 Running 1 12s thanos-ruler-user-workload-0 3/3 Running 0 8s thanos-ruler-user-workload-1 3/3 Running 0 8s ---- -  -== Create a Service object to expose Quay metrics +[id="creating-service-object-expose-quay-metrics"] +== Creating a Service object to expose {productname} metrics + +Use the following procedure to create a `Service` object to expose {productname} metrics. + +.Procedure . Create a YAML file for the Service object: + ---- -$ cat quay-service.yaml +$ cat < quay-service.yaml apiVersion: v1 kind: Service @@ -147,26 +212,34 @@ spec: quay-component: quay-app quay-operator/quayregistry: example-registry type: ClusterIP +EOF ---- -  -  -. Create the Service object: + +. Create the `Service` object by entering the following command: + +[source,terminal] ---- $ oc apply -f quay-service.yaml - +---- ++ +.Example output ++ +[source,terminal] +---- service/example-registry-quay-metrics created ---- -== Create a ServiceMonitor object +[id="creating-servicemonitor-object"] +== Creating a ServiceMonitor object -Configure OpenShift Monitoring to scrape the metrics by creating a ServiceMonitor resource. +Use the following procedure to configure OpenShift Monitoring to scrape the metrics by creating a `ServiceMonitor` resource. +.Procedure -. Create a YAML file for the ServiceMonitor resource: +. Create a YAML file for the `ServiceMonitor` resource: + ---- -$ cat quay-service-monitor.yaml +$ cat < quay-service-monitor.yaml apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -178,28 +251,34 @@ metadata: spec: endpoints: - port: quay-metrics - namespaceSelector: - any: true + namespaceSelector: + any: true selector: matchLabels: quay-component: monitoring +EOF ---- -. Create the ServiceMonitor: +. Create the `ServiceMonitor` resource by entering the following command: + ---- -$ oc apply -f quay-service-monitor.yaml - +$ oc apply -f quay-service-monitor.yaml +---- ++ +.Example output ++ +[source,terminal] +---- servicemonitor.monitoring.coreos.com/example-registry-quay-metrics-monitor created ---- -== View the metrics in OpenShift +[id="view-metrics-in-ocp"] +== Viewing metrics in {ocp} -You can access the metrics in the OpenShift console under Monitoring -> Metrics. In the Expression field, enter the text `quay_` to see the list of metrics available: +You can access the metrics in the {ocp} console under *Monitoring* -> *Metrics*. In the Expression field, enter *quay_* to see the list of metrics available: image:metrics-single-namespace.png[Quay metrics] - -For example, if you have added users to your registry, select the `quay-users_rows` metric: +For example, if you have added users to your registry, select the *quay-users_rows* metric: image:metrics-single-namespace-users.png[Quay metrics] diff --git a/modules/moving-a-tag.adoc b/modules/moving-a-tag.adoc new file mode 100644 index 000000000..d557b0fd8 --- /dev/null +++ b/modules/moving-a-tag.adoc @@ -0,0 +1,16 @@ +:_content-type: CONCEPT +[id="moving-a-tag"] += Moving an image tag + +You can move a tag to a different image if desired. + +.Procedure + +* Click the *Settings*, or _gear_, icon next to the tag and click *Add New Tag* and enter an existing tag name. +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +confirms that you want the tag moved instead of added. \ No newline at end of file diff --git a/modules/namespace-auto-pruning-arch.adoc b/modules/namespace-auto-pruning-arch.adoc new file mode 100644 index 000000000..989bf96ef --- /dev/null +++ b/modules/namespace-auto-pruning-arch.adoc @@ -0,0 +1,76 @@ +:_content-type: CONCEPT +[id="namespace-auto-pruning-arch"] += Namespace auto-pruning architecture + +For the namespace auto-pruning feature, two distinct database tables within a database schema were created: one for `namespaceautoprunepolicy` and another for `autoprunetaskstatus`. An auto-prune worker carries out the configured policies. + +[discrete] +[id="namespaceautoprunepolicy-database-table"] +== Namespace auto prune policy database table + +The `namespaceautoprunepolicy` database table holds the policy configuration for a single namespace. There is only one entry per namespace, but there is support for multiple rows per `namespace_id`. The `policy` field holds the policy details, such as `{method: "creation_date", olderThan: "2w"}` or `{method: "number_of_tags", numTags: 100}`. + +.`namespaceautoprunepolicy` database table +[cols="1a,1a,1a,1a",options="header"] +|=== +| Field | Type |Attributes | Description + +| `uuid` | character varying (225) | Unique, indexed | Unique identifier for this policy + +| `namespace_id` | Integer | Foreign Key |Namespace that the policy falls under + +| `policy` | text | JSON | Policy configuration +|=== + +[discrete] +[id="autoprunetaskstatus-database-table"] +== Auto-prune task status database table + +The `autoprunetaskstatus` table registers tasks to be executed by the auto-prune worker. Tasks are executed within the context of a single namespace. Only one task per namespace exists. + +.`autoprunetaskstatus` database table +[cols="1a,1a,1a,1a",options="header"] + +|=== +| Field | Type |Attributes | Description +| `namespace_id` | Integer | Foreign Key | Namespace that this task belongs to + +| `last_ran_ms` | Big Integer (bigint) | Nullable, indexed | Last time that the worker executed the policies for this namespace + +| `status` | text | Nullable | Details from the last execution task +|=== + +[id="auto-prune-worker"] +== Auto-prune worker + +The following sections detail information about the auto-prune worker. + +[id="auto-prune-task-creation"] +=== Auto-prune-task-creation + +When a new policy is created in the `namespaceautoprunepolicy` database table, a row is also created in the `autoprunetask` table. This is done in the same transaction. The auto-prune worker uses the entry in the `autoprunetask` table to identify which namespace it should execute policies for. + +[id="auto-prune-worker-execution"] +=== Auto-prune worker execution + +The auto-pruning worker is an asynchronous job that executes configured policies. Its workflow is based on values in the `autoprunetask` table. When a task begins, the following occurs: + +* The auto-prune worker starts on a set interval, which defaults at 30 seconds. +* The auto-prune worker selects a row from `autoprunetask` with the least, or null, `last_ran_ms` and `FOR UPDATE SKIP LOCKED`. +** A null `last_ran_ms` indicates that the task was never ran. +** A task that hasn't been ran in he longest amount of time, or has never been run at all, is prioritized. + +* The auto-prune worker obtains the policy configuration from the `namespaceautoprunepolicy` table. +** If no policy configuration exists, the entry from `autoprunetask` is deleted for this namespace and the procedure stops immediately. + +* The auto-prune worker begins a paginated loop of all repositories under the organization. +** The auto-prune worker determines much pruning method to use based on `policy.method`. +* The auto-prune worker executes the pruning method with the policy configuration retrieved earlier. +** For pruning by the number of tags: the auto-pruner worker gets the number of currently active tags sorted by creation date, and deletes the older tags to the configured number. +** For pruning by date: the auto-pruner worker gets the active tags older than the specified time span and any tags returned are deleted. + +* The auto-prune worker adds audit logs of the tags deleted. + +* The `last_ran_ms` gets updated after a row from `autoprunetask` is selected. + +* The auto-prune worker ends. diff --git a/modules/nested-ldap-team-sync.adoc b/modules/nested-ldap-team-sync.adoc new file mode 100644 index 000000000..53a94ed4a --- /dev/null +++ b/modules/nested-ldap-team-sync.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="nested-ldap-team-sync"] += Does {productname} support nested LDAP groups for team synchronization? + +{productname} does not currently support nested LDAP team synchronization. As a temporary workaround, you can manually maintain {productname} team membership without using the `directory synchronization` feature, and perform regular syncs of your {productname} team with LDAP group members using cronjobs. \ No newline at end of file diff --git a/modules/notification-actions.adoc b/modules/notification-actions.adoc new file mode 100644 index 000000000..3f97adbda --- /dev/null +++ b/modules/notification-actions.adoc @@ -0,0 +1,72 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="notification-actions"] += Notification actions + +Notifications are added to the *Events and Notifications* section of the *Repository Settings* page. They are also added to the *Notifications* window, which can be found by clicking the _bell_ icon in the navigation pane of +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +notifications can be setup to be sent to a _User_, _Team_, or the entire _organization_. + +Notifications can be delivered by one of the following methods. + +[discrete] +[id="e-mail"] +== *E-mail notifications* + +E-mails are sent to specified addresses that describe the specified event. E-mail addresses must be verified on a _per-repository_ basis. + +[discrete] +[id="webhook-post"] +== *Webhook POST notifications* + +An HTTP `POST` call is made to the specified URL with the event's data. For more information about event data, see "Repository events description". + +When the URL is HTTPS, the call has an SSL client certificate set from +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +Verification of this certificate proves that the call originated from +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +Responses with the status code in the `2xx` range are considered successful. Responses with any other status code are considered failures and result in a retry of the webhook notification. + +[discrete] +[id="flowdock-notification"] +== *Flowdock notifications* + +Posts a message to Flowdock. + +[discrete] +[id="hipchat-notification"] +== *Hipchat notifications* + +Posts a message to HipChat. + +[discrete] +[id="slack-notification"] +== *Slack notifications* + +Posts a message to Slack. \ No newline at end of file diff --git a/modules/oauth2-access-tokens.adoc b/modules/oauth2-access-tokens.adoc new file mode 100644 index 000000000..625e4dfa8 --- /dev/null +++ b/modules/oauth2-access-tokens.adoc @@ -0,0 +1,43 @@ +:_content-type: REFERENCE +[id="oauth2-access-tokens"] += OAuth 2 access tokens + +link:https://oauth.net/2/[OAuth 2] access tokens (considered "API tokens" for {productname}) enable user-authenticated access to the {productname} API, suitable for applications that require user identity verification. These tokens are obtained through an OAuth 2 authorization process, where a {productname} administrator generates a token on behalf of themselves or another user to access {productname} API endpoints. OAuth 2 tokens authorize actions on API endpoints based on the scopes defined for the token. + +[NOTE] +==== +Although OAuth 2 tokens authorize actions on API endpoints based on the scopes defined for the token, access to the resources themselves is governed by {productname}'s role-based access control (RBAC) mechanisms. Actions can be created on a resource, for example, a repository, provided that you have the proper role (*Admin* or *Creator*) to do so for that namespace. This is true even if the API token was granted the `repo:admin` scope. +==== + +OAuth 2 access tokens can only be created by using the {productname} UI; there is no way to create an OAuth 2 access token by using the CLI. When creating an OAuth 2 token, the following options can be selected for a token holder: + +* *Administer Organization*. When selected, allows the user to be able to administer organizations, including creating robots, creating teams, adjusting team membership, and changing billing settings. + +* *Administer Repositories*. When selected, provides the user administrator access to all repositories to which the granting user has access. + +* *Create Repositories*. When selected, provides the user the ability to create repositories in any namespaces that the granting user is allowed to create repositories. + +* *View all visible repositories*. When selected, provides the user the ability to view and pull all repositories visible to the granting user. + +* *Read/Write to any accessible repositories*. When selected, provides the user the ability to view, push and pull to all repositories to which the granting user has write access. + +* *Super User Access*. When selected, provides the user the ability to administer your installation including managing users, managing organizations and other features found in the superuser panel. + +* *Administer User* When selected, provides the user the ability to administer your account including creating robots and granting them permissions to your repositories. + +* *Read User Information*. When selected, provides the user the ability to read user information such as username and email address. + +Token distributors should be mindful of the permissions that they are granting when generating a token on behalf of a user, and should have absolute trust in a user before granting such permissions as *Administer organization*, *Super User Access*, and *Administer User*. Additionally, the access token is only revealed at the time of creation; they cannot be listed from the CLI, nor can they be found on the {productname} UI. If an access token is lost or forgotten, a new token must be created; a token cannot be recovered. + +OAuth 2 access tokens are passed as a `Bearer` token in the `Authorization` header of an API call and, as a result, are used to provide authentication and authorization to the defined API endpoint, such as an image tag, a repository, an organization, and so on. + +The API is available from the `/api/v1` endpoint of your {productname} host. For example, `\https:///api/v1`. It allows users to connect to endpoints through their browser to `GET`, `POST`, `DELETE`, and `PUT` {productname} settings by enabling the Swagger UI. The API can be accessed by applications that make API calls and use OAuth tokens, and it sends and receives data as JSON. + +With {productname}, there is currently no way to rotate or to set an expiration time on an OAuth 2 access token, and the token lifespan is 10 years. Tokens can be deleted by deleting the applications in which they were created in the event that they are compromised, however, this deletes all tokens that were made within that specific application. + +[NOTE] +==== +In practice, {productname} administrators _could_ create a new OAuth application on the *Applications* page of their organization each time they wanted to create a new OAuth token for a user. This would ensure that a single application is not responsible for all OAuth tokens. As a result, in the event that a user's token is compromised, the administrator would delete the application of the compromised token. This would help avoid disruption for other users whose tokens might be part of the same application. +==== + +The following sections shows you how to generate and reassign an OAuth 2 access token. \ No newline at end of file diff --git a/modules/obtaining-quay-config-information.adoc b/modules/obtaining-quay-config-information.adoc new file mode 100644 index 000000000..844baddea --- /dev/null +++ b/modules/obtaining-quay-config-information.adoc @@ -0,0 +1,159 @@ +:_content-type: PROCEDURE +[id="obtaining-quay-config-information"] += Configuration information for {productname} + +Checking a configuration YAML can help identify and resolve various issues related to the configuration of {productname}. Checking the configuration YAML can help you address the following issues: + +* *Incorrect Configuration Parameters*: If the database is not functioning as expected or is experiencing performance issues, your configuration parameters could be at fault. By checking the configuration YAML, administrators can ensure that all the required parameters are set correctly and match the intended settings for the database. + +* *Resource Limitations*: The configuration YAML might specify resource limits for the database, such as memory and CPU limits. If the database is running into resource constraints or experiencing contention with other services, adjusting these limits can help optimize resource allocation and improve overall performance. + +* *Connectivity Issues*: Incorrect network settings in the configuration YAML can lead to connectivity problems between the application and the database. Ensuring that the correct network configurations are in place can resolve issues related to connectivity and communication. + +* *Data Storage and Paths*: The configuration YAML may include paths for storing data and logs. If the paths are misconfigured or inaccessible, the database may encounter errors while reading or writing data, leading to operational issues. + +* *Authentication and Security*: The configuration YAML may contain authentication settings, including usernames, passwords, and access controls. Verifying these settings is crucial for maintaining the security of the database and ensuring only authorized users have access. + +* *Plugin and Extension Settings*: Some databases support extensions or plugins that enhance functionality. Issues may arise if these plugins are misconfigured or not loaded correctly. Checking the configuration YAML can help identify any problems with plugin settings. + +* *Replication and High Availability Settings*: In clustered or replicated database setups, the configuration YAML may define replication settings and high availability configurations. Incorrect settings can lead to data inconsistency and system instability. + +* *Backup and Recovery Options*: The configuration YAML might include backup and recovery options, specifying how data backups are performed and how data can be recovered in case of failures. Validating these settings can ensure data safety and successful recovery processes. + +By checking your configuration YAML, {productname} administrators can detect and resolve these issues before they cause significant disruptions to the application or service relying on the database. + +[id="obtaining-configuration-information-quay"] +== Obtaining configuration information for {productname} + +Configuration information can be obtained for all types of {productname} deployments, include standalone, Operator, and geo-replication deployments. Obtaining configuration information can help you resolve issues with authentication and authorization, your database, object storage, and repository mirroring. After you have obtained the necessary configuration information, you can update your `config.yaml` file, search the link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] for a solution, or file a support ticket with the Red Hat Support team. + +.Procedure + +. To obtain configuration information on {productname} Operator deployments, you can use `oc exec`, `oc cp`, or `oc rsync`. + +.. To use the `oc exec` command, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it -- cat /conf/stack/config.yaml +---- ++ +This command returns your `config.yaml` file directly to your terminal. + +.. To use the `oc copy` command, enter the following commands: ++ +[source,terminal] +---- +$ oc cp :/conf/stack/config.yaml /tmp/config.yaml +---- ++ +To display this information in your terminal, enter the following command: ++ +[source,terminal] +---- +$ cat /tmp/config.yaml +---- + +.. To use the `oc rsync` command, enter the following commands: ++ +[source,terminal] +---- +oc rsync :/conf/stack/ /tmp/local_directory/ +---- ++ +To display this information in your terminal, enter the following command: ++ +[source,terminal] +---- +$ cat /tmp/local_directory/config.yaml +---- ++ +.Example output ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: +local_us: +- RHOCSStorage +- access_key: redacted + bucket_name: lht-quay-datastore-68fff7b8-1b5e-46aa-8110-c4b7ead781f5 + hostname: s3.openshift-storage.svc.cluster.local + is_secure: true + port: 443 + secret_key: redacted + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: +- local_us +DISTRIBUTED_STORAGE_PREFERENCE: +- local_us +---- + +. To obtain configuration information on standalone {productname} deployments, you can use `podman cp` or `podman exec`. + +.. To use the `podman copy` command, enter the following commands: ++ +[source,terminal] +---- +$ podman cp :/conf/stack/config.yaml /tmp/local_directory/ +---- ++ +To display this information in your terminal, enter the following command: ++ +[source,terminal] +---- +$ cat /tmp/local_directory/config.yaml +---- + +.. To use `podman exec`, enter the following commands: ++ +[source,terminal] +---- +$ podman exec -it cat /conf/stack/config.yaml +---- ++ +.Example output ++ +[source,yaml] +---- +BROWSER_API_CALLS_XHR_ONLY: false +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.oci.image.config.v1+json: + - application/vnd.oci.image.layer.v1.tar+zstd + application/vnd.sylabs.sif.config.v1+json: + - application/vnd.sylabs.sif.layer.v1+tar +AUTHENTICATION_TYPE: Database +AVATAR_KIND: local +BUILDLOGS_REDIS: + host: quay-server.example.com + password: strongpassword + port: 6379 +DATABASE_SECRET_KEY: 05ee6382-24a6-43c0-b30f-849c8a0f7260 +DB_CONNECTION_ARGS: {} +--- +---- + +[id="obtaining-db-config-info"] +== Obtaining database configuration information + +You can obtain configuration information about your database by using the following procedure. + +[WARNING] +==== +Interacting with the PostgreSQL database is potentially destructive. It is highly recommended that you perform the following procedure with the help of a {productname} Support Specialist. +==== + +.Procedure + +* If you are using the {productname} Operator on {ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it -- cat /var/lib/pgsql/data/userdata/postgresql.conf +---- + +* If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it cat /var/lib/pgsql/data/userdata/postgresql.conf +---- diff --git a/modules/obtaining-quay-logs.adoc b/modules/obtaining-quay-logs.adoc new file mode 100644 index 000000000..3d9edddee --- /dev/null +++ b/modules/obtaining-quay-logs.adoc @@ -0,0 +1,106 @@ +:_content-type: PROCEDURE +[id="obtaining-quay-logs"] += Logging information for {productname} + +Obtaining log information using can be beneficial in various ways for managing, monitoring, and troubleshooting applications running in containers or pods. Some of the reasons why obtaining log information is valuable include the following: + +* *Debugging and Troubleshooting*: Logs provide insights into what's happening inside the application, allowing developers and system administrators to identify and resolve issues. By analyzing log messages, one can identify errors, exceptions, warnings, or unexpected behavior that might occur during the application's execution. + +* *Performance Monitoring*: Monitoring logs helps to track the performance of the application and its components. Monitoring metrics like response times, request rates, and resource utilization can help in optimizing and scaling the application to meet the demand. + +* *Security Analysis*: Logs can be essential in auditing and detecting potential security breaches. By analyzing logs, suspicious activities, unauthorized access attempts, or any abnormal behavior can be identified, helping in detecting and responding to security threats. + +* *Tracking User Behavior*: In some cases, logs can be used to track user activities and behavior. This is particularly important for applications that handle sensitive data, where tracking user actions can be useful for auditing and compliance purposes. + +* *Capacity Planning*: Log data can be used to understand resource utilization patterns, which can aid in capacity planning. By analyzing logs, one can identify peak usage periods, anticipate resource needs, and optimize infrastructure accordingly. + +* *Error Analysis*: When errors occur, logs can provide valuable context about what happened leading up to the error. This can help in understanding the root cause of the issue and facilitating the debugging process. + +* *Verification of Deployment*: Logging during the deployment process can help verify if the application is starting correctly and if all components are functioning as expected. + +* *Continuous Integration/Continuous Deployment (CI/CD)*: In CI/CD pipelines, logging is essential to capture build and deployment statuses, allowing teams to monitor the success or failure of each stage. + +[id="obtaining-log-information-quay"] +== Obtaining log information for {productname} + +Log information can be obtained for all types of {productname} deployments, including geo-replication deployments, standalone deployments, and Operator deployments. Log information can also be obtained for mirrored repositories. It can help you troubleshoot authentication and authorization issues, and object storage issues. After you have obtained the necessary log information, you can search the link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] for a solution, or file a support ticket with the Red Hat Support team. + +Use the following procedure to obtain logs for your {productname} deployment. + +.Procedure + +* If you are using the {productname} Operator on {ocp}, enter the following command to view the logs: ++ +[source,terminal] +---- +$ oc logs +---- + +* If you are on a standalone {productname} deployment, enter the following command: ++ +[source,terminal] +---- +$ podman logs +---- ++ +.Example output ++ +[source,terminal] +---- +... +gunicorn-web stdout | 2023-01-20 15:41:52,071 [205] [DEBUG] [app] Starting request: urn:request:0d88de25-03b0-4cf9-b8bc-87f1ac099429 (/oauth2/azure/callback) {'X-Forwarded-For': '174.91.79.124'} +... +---- + +[id="obtaining-verbose-container-pod-logs"] +== Examining verbose logs + +{productname} does not have verbose logs, however, with the following procedures, you can obtain a detailed status check of your database pod or container. + +[NOTE] +==== +Additional debugging information can be returned if you have deployed {productname} in one of the following ways: + +* You have deployed {productname} by passing in the `DEBUGLOG=true` variable. +* You have deployed {productname} with LDAP authentication enabled by passing in the `DEBUGLOG=true` and `USERS_DEBUG=1` variables. +* You have configured {productname-ocp} by updating the `QuayRegistry` resource to include `DEBUGLOG=true`. + +For more information, see "Running {productname} in debug mode". +==== +.Procedure + +. Enter the following commands to examine verbose database logs. + +.. If you are using the {productname} Operator on {ocp}, enter the following commands: ++ +[source,terminal] +---- +$ oc logs --previous +---- ++ +[source,terminal] +---- +$ oc logs --previous -c +---- ++ +[source,terminal] +---- +$ oc cp :/var/lib/pgsql/data/userdata/log/* /path/to/desired_directory_on_host +---- + +.. If you are using a standalone deployment of {productname}, enter the following commands: ++ +[source,terminal] +---- +$ podman logs --previous +---- ++ +[source,terminal] +---- +$ podman logs --previous -c +---- ++ +[source,terminal] +---- +$ podman cp :/var/lib/pgsql/data/userdata/log/* /path/to/desired_directory_on_host +---- \ No newline at end of file diff --git a/modules/oci-intro.adoc b/modules/oci-intro.adoc index 31140f71e..e87ab057d 100644 --- a/modules/oci-intro.adoc +++ b/modules/oci-intro.adoc @@ -1,10 +1,53 @@ -[[oci-intro]] -= OCI Support and {productname} +:_content-type: CONCEPT +[id="oci-intro"] += Open Container Initiative support -Container registries such as {productname} were originally designed to support container images in the Docker image format. To promote the use of additional runtimes apart from Docker, the Open Container Initiative (OCI) was created to provide a standardization surrounding container runtimes and image formats. Most container registries support the OCI standardization as it is based on the link:https://docs.docker.com/registry/spec/manifest-v2-2/[Docker image manifest V2, Schema 2] format. +Container registries were originally designed to support container images in the Docker image format. To promote the use of additional runtimes apart from Docker, the Open Container Initiative (OCI) was created to provide a standardization surrounding container runtimes and image formats. Most container registries support the OCI standardization as it is based on the link:https://docs.docker.com/registry/spec/manifest-v2-2/[Docker image manifest V2, Schema 2] format. -In addition to container images, a variety of artifacts have emerged that support not just individual applications, but the Kubernetes platform as a whole. These range from Open Policy Agent (OPA) policies for security and governance to Helm charts and Operators to aid in application deployment. +In addition to container images, a variety of artifacts have emerged that support not just individual applications, but also the Kubernetes platform as a whole. These range from Open Policy Agent (OPA) policies for security and governance to Helm charts and Operators that aid in application deployment. -{productname} is a private container registry that not only stores container images, but supports an entire ecosystem of tooling to aid in the management of containers. Support for OCI based artifacts in {productname} 3.6 has extended from solely Helm to include cosign and ztsd compression schemes by default. As such, `FEATURE_HELM_OCI_SUPPORT` has been deprecated. +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +is a private container registry that not only stores container images, but also supports an entire ecosystem of tooling to aid in the management of containers. +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +strives to be as compatible as possible with the link:https://opencontainers.org/posts/blog/2024-03-13-image-and-distribution-1-1/[OCI 1.1 _Image and Distribution specifications_], and supports common media types like _Helm charts_ (as long as they pushed with a version of Helm that supports OCI) and a variety of arbitrary media types within the manifest or layer components of container images. +ifeval::["{context}" == "quay-io"] +Support for OCI media types differs from previous iterations of {quayio}, when the registry was more strict about accepted media types. Because {quayio} now works with a wider array of media types, including those that were previously outside the scope of its support, it is now more versatile accommodating not only standard container image formats but also emerging or unconventional types. +endif::[] +ifeval::["{context}" == "use-quay"] +Support for OCI media types differs from previous iterations of {productname}, when the registry was more strict about accepted media types. Because {productname} now works with a wider array of media types, including those that were previously outside the scope of its support, it is now more versatile accommodating not only standard container image formats but also emerging or unconventional types. +endif::[] -When {productname} 3.6 is deployed using the OpenShift Operator, support for Helm and OCI artifacts is now enabled by default under the `FEATURE_GENERAL_OCI_SUPPORT` configuration. If you need to explicitly enable the feature, for example, if it has previously been disabled or if you have upgraded from a version where it is not enabled by default, see the section xref:config-fields-helm-oci[Explicitly enabling OCI and Helm support]. +In addition to its expanded support for novel media types, +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +ensures compatibility with Docker images, including V2_2 and V2_1 formats. This compatibility with Docker V2_2 and V2_1 images demonstrates +ifeval::["{context}" == "quay-io"] +{quayio}'s +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}'s' +endif::[] +commitment to providing a seamless experience for Docker users. Moreover, +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +continues to extend its support for Docker V1 pulls, catering to users who might still rely on this earlier version of Docker images. + +Support for OCI artifacts are enabled by default. The following examples show you how to use some media types, which can be used as examples for using other OCI media types. \ No newline at end of file diff --git a/modules/oci-referrers-oauth-access-token.adoc b/modules/oci-referrers-oauth-access-token.adoc new file mode 100644 index 000000000..6cc66d76e --- /dev/null +++ b/modules/oci-referrers-oauth-access-token.adoc @@ -0,0 +1,14 @@ +:_content-type: REFERENCE +[id="oci-referrers-oauth-access-token"] += OCI referrers OAuth access token + +In some cases, depending on the features that your {productname} deployment is configured to use, you might need to leverage an _OCI referrers OAuth access token_. OCI referrers OAuth access tokens are used to list OCI referrers of a manifest under a repository, and uses a `curl` command to make a `GET` request to the {productname} `v2/auth` endpoint. + +These tokens are obtained via basic HTTP authentication, wherein the user provides a username and password encoded in Base64 to authenticate directly with the `v2/auth` API endpoint. As such, they are based directly on the user's credentials aod do not follow the same detailed authorization flow as OAuth 2, but still allow a user to authorize API requests. + +_OCI referrers OAuth access tokens_ do not offer scope-based permissions and do not expire. They are solely used to list OCI referrers of a manifest under a repository. + +[discrete] +== Additional resource + +* link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#attaching-referrers-image-tag[Attaching referrers to an image tag] \ No newline at end of file diff --git a/modules/oidc-config-fields.adoc b/modules/oidc-config-fields.adoc new file mode 100644 index 000000000..30e1752f7 --- /dev/null +++ b/modules/oidc-config-fields.adoc @@ -0,0 +1,65 @@ +[id="oidc-config-fields"] += OIDC configuration fields + +.OIDC fields +|=== +| Field | Type | Description +| **_LOGIN_CONFIG** + +(Required) | String | The parent key that holds the OIDC configuration settings. Typically the name of the OIDC provider, for example, `AZURE_LOGIN_CONFIG`, however any arbitrary string is accepted. +| **.CLIENT_ID** + +(Required) | String | The registered client ID for this {productname} instance. + + + +**Example:** `0e8dbe15c4c7630b6780` +| **.CLIENT_SECRET** + +(Required) | String | The registered client secret for this {productname} instance. + + + +**Example:** `e4a58ddd3d7408b7aec109e85564a0d153d3e846` +| **.DEBUGLOG** |Boolean | Whether to enable debugging. +| **.LOGIN_BINDING_FIELD** |String | Used when the internal authorization is set to LDAP. {productname} reads this parameter and tries to search through the LDAP tree for the user with this username. If it exists, it automatically creates a link to that LDAP account. +| **.LOGIN_SCOPES** | Object | Adds additional scopes that {productname} uses to communicate with the OIDC provider. +| **.OIDC_ENDPOINT_CUSTOM_PARAMS** | String | Support for custom query parameters on OIDC endpoints. The following endpoints are supported: +`authorization_endpoint`, `token_endpoint`, and `user_endpoint`. +| **.OIDC_ISSUER** | String | Allows the user to define the issuer to verify. For example, JWT tokens container a parameter known as `iss` which defines who issued the token. By default, this is read from the `.well-know/openid/configuration` endpoint, which is exposed by every OIDC provider. If this verification fails, there is no login. +| **.OIDC_SERVER** + +(Required) | String | The address of the OIDC server that is being used for authentication. + + + +**Example:** `\https://sts.windows.net/6c878.../` +| **.PREFERRED_USERNAME_CLAIM_NAME** |String |Sets the preferred username to a parameter from the token. +| **.SERVICE_ICON** | String | Changes the icon on the login screen. + +| **.SERVICE_NAME** + +(Required) | String | The name of the service that is being authenticated. + + + +**Example:** `Microsoft Entra ID` +| **.VERIFIED_EMAIL_CLAIM_NAME** | String | The name of the claim that is used to verify the email address of the user. + +| **.PREFERRED_GROUP_CLAIM_NAME** | String | The key name within the OIDC token payload that holds information about the user's group memberships. + +| **.OIDC_DISABLE_USER_ENDPOINT** | Boolean | Whether to allow or disable the `/userinfo` endpoint. If using Azure Entra ID, this field must be set to `true` because Azure obtains the user's information from the token instead of calling the `/userinfo` endpoint. + + + + **Default:** `false` +|=== + +[id="oidc-config"] +== OIDC configuration + +The following example shows a sample OIDC configuration. + +.Example OIDC configuration +[source,yaml] +---- +AUTHENTICATION_TYPE: OIDC +# ... +AZURE_LOGIN_CONFIG: + CLIENT_ID: + CLIENT_SECRET: + OIDC_SERVER: + DEBUGGING: true + SERVICE_NAME: Microsoft Entra ID + VERIFIED_EMAIL_CLAIM_NAME: + OIDC_DISABLE_USER_ENDPOINT: true + OIDC_ENDPOINT_CUSTOM_PARAMS": + "authorization_endpoint": + "some": "param", +# ... +---- \ No newline at end of file diff --git a/modules/openshift-routes-limitations.adoc b/modules/openshift-routes-limitations.adoc new file mode 100644 index 000000000..caeb35d6b --- /dev/null +++ b/modules/openshift-routes-limitations.adoc @@ -0,0 +1,39 @@ +:_content-type: PROCEDURE +[id="openshift-routes-limitations"] += {productname-ocp} _builds_ limitations with self-managed _routes_ + +The following limitations apply when you are using the {productname} Operator on {ocp} with a managed `route` component: + +* Currently, {ocp} _routes_ are only able to serve traffic to a single port. Additional steps are required to set up {productname} Builds. + +* Ensure that your `kubectl` or `oc` CLI tool is configured to work with the cluster where the {productname} Operator is installed and that your `QuayRegistry` exists; the `QuayRegistry` does not have to be on the same bare metal cluster where _builders_ run. + +* Ensure that HTTP/2 ingress is enabled on the OpenShift cluster by following link:https://docs.openshift.com/container-platform/{ocp-y}/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress[these steps]. + +* The {productname} Operator creates a `Route` resource that directs gRPC traffic to the Build manager server running inside of the existing `Quay` pod, or pods. If you want to use a custom hostname, or a subdomain like ``, ensure that you create a CNAME record with your DNS provider that points to the `status.ingress[0].host` of the create `Route` resource. For example: ++ +---- +$ kubectl get -n route -quay-builder -o jsonpath={.status.ingress[0].host} +---- + +* Using the {ocp} UI or CLI, update the `Secret` referenced by `spec.configBundleSecret` of the `QuayRegistry` with the _build_ cluster CA certificate. Name the key `extra_ca_cert_build_cluster.cert`. Update the `config.yaml` file entry with the correct values referenced in the _build_ configuration that you created when you configured {productname} _builds_, and add the `BUILDMAN_HOSTNAME` CONFIGURATION FIELD: ++ +[source,yaml] +---- +BUILDMAN_HOSTNAME: <1> +BUILD_MANAGER: +- ephemeral +- ALLOWED_WORKER_COUNT: 1 + ORCHESTRATOR_PREFIX: buildman/production/ + JOB_REGISTRATION_TIMEOUT: 600 + ORCHESTRATOR: + REDIS_HOST: + REDIS_SSL: true + REDIS_SKIP_KEYSPACE_EVENT_SETUP: false + EXECUTORS: + - EXECUTOR: kubernetes + BUILDER_NAMESPACE: builder + ... +---- +<1> The externally accessible server hostname which the _build jobs_ use to communicate back to the _build manager_. Default is the same as `SERVER_HOSTNAME`. For an OpenShift `route` resource, it is either `status.ingress[0].host` or the CNAME entry if using a custom hostname. `BUILDMAN_HOSTNAME` must include the port number, for example, `somehost:443` for an {ocp} `route` resource, as the gRPC client used to communicate with the _build manager_ does not infer any port if omitted. diff --git a/modules/operator-cloudfront.adoc b/modules/operator-cloudfront.adoc index 30473fba5..e6fe35ef4 100644 --- a/modules/operator-cloudfront.adoc +++ b/modules/operator-cloudfront.adoc @@ -1,7 +1,19 @@ -[[operator-cloudfront]] +:_content-type: PROCEDURE +[id="operator-cloudfront"] = AWS S3 CloudFront -If you use AWS S3 CloudFront for backend registry storage, specify the private key as shown in the following example: -.... +[NOTE] +==== +Currently, using AWS S3 CloudFront is not supported on IBM Power and IBM Z. +==== + +Use the following procedure if you are using AWS S3 Cloudfront for your backend registry storage. + +.Procedure + +. Enter the following command to specify the registry key: ++ +[source,terminal] +---- $ oc create secret generic --from-file config.yaml=./config_awss3cloudfront.yaml --from-file default-cloudfront-signing-key.pem=./default-cloudfront-signing-key.pem test-config-bundle -.... \ No newline at end of file +---- \ No newline at end of file diff --git a/modules/operator-components-intro.adoc b/modules/operator-components-intro.adoc index 0c7482cee..c82b70d17 100644 --- a/modules/operator-components-intro.adoc +++ b/modules/operator-components-intro.adoc @@ -1,15 +1,24 @@ +:_content-type: CONCEPT +[id="operator-components-intro"] += {productname} Operator components -[[operator-components-intro]] -= Quay Operator components +{productname} has many dependencies. These dependencies include a database, object storage, Redis, and others. The {productname} Operator manages an opinionated deployment of {productname} and its dependencies on Kubernetes. These dependencies are treated as _components_ and are configured through the `QuayRegistry` API. -Quay is a powerful container registry platform and as a result, has a significant number of dependencies. These include a database, object storage, Redis, and others. The Quay Operator manages an opinionated deployment of Quay and its dependencies on Kubernetes. These dependencies are treated as _components_ and are configured through the `QuayRegistry` API. +In the `QuayRegistry` custom resource, the `spec.components` field configures components. Each component contains two fields: `kind` (the name of the component), and `managed` (a boolean that addresses whether the component lifecycle is handled by the {productname} Operator). -In the `QuayRegistry` custom resource, the `spec.components` field configures components. Each component contains two fields: `kind` - the name of the component, and `managed` - boolean whether the component lifecycle is handled by the Operator. By default (omitting this field), all components are managed and will be autofilled upon reconciliation for visibility: +By default, all components are managed and auto-filled upon reconciliation for visibility: +.Example `QuayRegistry` resource [source,yaml] ---- -spec: - components: +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise + spec: + configBundleSecret: config-bundle-secret + components: - kind: quay managed: true - kind: postgres diff --git a/modules/operator-components-managed.adoc b/modules/operator-components-managed.adoc index 3005827f6..fa9bc4ac0 100644 --- a/modules/operator-components-managed.adoc +++ b/modules/operator-components-managed.adoc @@ -1,29 +1,39 @@ -[[operator-components-managed]] +:_content-type: REFERENCE +[id="operator-components-managed"] = Using managed components +Unless your `QuayRegistry` custom resource specifies otherwise, the {productname} Operator uses defaults for the following managed components: -Unless your `QuayRegistry` custom resource specifies otherwise, the Operator will use defaults for the following managed components: +* **quay:** Holds overrides for deployment of {productname-ocp}, for example, environment variables and number of replicas. This component is new as of {productname} 3.7 and cannot be set to unmanaged. -* **quay:** Holds overrides for the Quay deployment, for example, environment variables and number of replicas. This component is new in {productname} 3.7 and cannot be set to unmanaged. * **postgres:** For storing the registry metadata, ifeval::["{productname}" == "Red Hat Quay"] -uses a version of Postgres 10 from the link:https://www.softwarecollections.org/en/[Software Collections] +As of {productname} 3.9, uses a version of PostgreSQL 13 from link:https://www.softwarecollections.org/en/[Software Collections]. ++ +[NOTE] +==== +When upgrading from {productname} 3.8 -> 3.9, the Operator automatically handles upgrading PostgreSQL 10 to PostgreSQL 13. This upgrade is required. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. +==== endif::[] ifeval::["{productname}" == "Project Quay"] -uses an upstream (CentOS) version of Postgres 10 +As of {productname} 3.9, uses an upstream (CentOS) version of PostgreSQL 13. endif::[] -* **clair:** Provides image vulnerability scanning -* **redis:** Handles Quay builder coordination and some internal logging -* **horizontalpodautoscaler:** Adjusts the number of Quay pods depending on memory/cpu consumption -* **objectstorage:** For storing image layer blobs, utilizes the `ObjectBucketClaim` Kubernetes API which is provided by Noobaa/RHOCS -* **route:** Provides an external entrypoint to the Quay registry from outside OpenShift -* **mirror:** Configures repository mirror workers (to support optional repository mirroring) -* **monitoring:** Features include a Grafana dashboard, access to individual metrics, and alerting to notify for frequently restarting Quay pods -* **tls:** Configures whether {productname} or OpenShift handles TLS -* **clairpostgres:** Configures a managed Clair database +* **clair:** Provides image vulnerability scanning. +* **redis:** Stores live builder logs and the {productname} tutorial. Also includes the locking mechanism that is required for garbage collection. -The Operator will handle any required configuration and installation work needed for {productname} to use the managed components. If the opinionated deployment performed by the Quay Operator is unsuitable for your environment, you can provide the Operator with `unmanaged` resources (overrides) as described in the following sections. +* **horizontalpodautoscaler:** Adjusts the number of `Quay` pods depending on memory/cpu consumption. +* **objectstorage:** For storing image layer blobs, utilizes the `ObjectBucketClaim` Kubernetes API which is provided by Noobaa or {odf}. +* **route:** Provides an external entrypoint to the {productname} registry from outside of {ocp}. +* **mirror:** Configures repository mirror workers to support optional repository mirroring. + +* **monitoring:** Features include a Grafana dashboard, access to individual metrics, and notifications for frequently restarting `Quay` pods. + +* **tls:** Configures whether {productname} or {ocp} handles SSL/TLS. + +* **clairpostgres:** Configures a managed Clair database. This is a separate database than the PostgreSQL database used to deploy {productname}. + +The {productname} Operator handles any required configuration and installation work needed for {productname} to use the managed components. If the opinionated deployment performed by the {productname} Operator is unsuitable for your environment, you can provide the {productname} Operator with `unmanaged` resources, or overrides, as described in the following sections. \ No newline at end of file diff --git a/modules/operator-components-unmanaged-other.adoc b/modules/operator-components-unmanaged-other.adoc index 2ff86b41d..7c9d46834 100644 --- a/modules/operator-components-unmanaged-other.adoc +++ b/modules/operator-components-unmanaged-other.adoc @@ -1,6 +1,5 @@ -[[operator-components-unmanaged-other]] -= Configuring other components - - -// TODO 36 Clair unmanaged +:_content-type: CONCEPT +[id="operator-components-unmanaged-other"] += Configuring external Redis +Use the content in this section to set up an external Redis deployment. \ No newline at end of file diff --git a/modules/operator-components-unmanaged.adoc b/modules/operator-components-unmanaged.adoc index 942222039..45570f8b6 100644 --- a/modules/operator-components-unmanaged.adoc +++ b/modules/operator-components-unmanaged.adoc @@ -1,13 +1,26 @@ -[[operator-components-unmanaged]] +:_content-type: REFERENCE +[id="operator-components-unmanaged"] = Using unmanaged components for dependencies -If you have existing components such as Postgres, Redis or object storage that you would like to use with Quay, you first configure them within the Quay configuration bundle (`config.yaml`) and then reference the bundle in your `QuayRegistry` (as a Kubernetes `Secret`) while indicating which components are unmanaged. +If you have existing components such as PostgreSQL, Redis, or object storage that you want to use with {productname}, you first configure them within the {productname} configuration bundle, or the `config.yaml` file. Then, they must be referenced in your `QuayRegistry` bundle as a Kubernetes `Secret` while indicating which components are unmanaged. + +//Might be used in a note, however I have removed due to the removal of the config editor on OCP deployments. + +//The {productname} config editor can also be used to create or modify an existing config bundle and simplifies the process of updating the Kubernetes `Secret`, especially for multiple changes. When {productname}'s configuration is changed by the config editor and sent to the {productname} Operator, the deployment is updated to reflect the new configuration. [NOTE] ==== -The Quay config editor can also be used to create or modify an existing config bundle and simplifies the process of updating the Kubernetes `Secret`, especially for multiple changes. When Quay's configuration is changed via the config editor and sent to the Operator, the Quay deployment will be updated to reflect the new configuration. +If you are using an unmanaged PostgreSQL database, and the version is PostgreSQL 10, it is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. ==== +See the following sections for configuring unmanaged components: - +* xref:operator-unmanaged-postgres[Using an existing PostgreSQL database] +* xref:operator-unmanaged-hpa[Using unmanaged Horizontal Pod Autoscalers] +* xref:operator-unmanaged-storage[Using unmanaged storage] +* xref:operator-unmanaged-storage-noobaa[Using an unmanaged NooBaa instance] +* xref:operator-unmanaged-redis[Using an unmanaged Redis database] +* xref:operator-unmanaged-route[Disabling the route component] +* xref:operator-unmanaged-monitoring[Disabling the monitoring component] +* xref:operator-unmanaged-mirroring[Disabling the mirroring component] \ No newline at end of file diff --git a/modules/operator-concepts.adoc b/modules/operator-concepts.adoc index 6e75d780f..05079f84b 100644 --- a/modules/operator-concepts.adoc +++ b/modules/operator-concepts.adoc @@ -1,16 +1,17 @@ -[[operator-concepts]] +:_content-type: CONCEPT +[id="operator-concepts"] = Introduction to the {productname} Operator -This document outlines the steps for configuring, deploying, managing and upgrading {productname} on OpenShift using the {productname} Operator. +Use the content in this chapter to execute the following: +* Install {productname-ocp} using the {productname} Operator -It shows you how to: +* Configure managed, or unmanaged, object storage -* Install the {productname} Operator -* Configure object storage, either managed or unmanaged -* Configure other unmanaged components, if required, including database, Redis, routes, TLS, etc. -* Deploy the {productname} registry on OpenShift using the Operator -* Use advanced features supported by the Operator -* Upgrade the registry by upgrading the Operator +* Configure unmanaged components, such as the database, Redis, routes, TLS, and so on +* Deploy the {productname} registry on {ocp} using the {productname} Operator +* Use advanced features supported by {productname} + +* Upgrade the {productname} registry by using the {productname} Operator \ No newline at end of file diff --git a/modules/operator-config-bundle-secret.adoc b/modules/operator-config-bundle-secret.adoc index 6018e3cbc..40f624a09 100644 --- a/modules/operator-config-bundle-secret.adoc +++ b/modules/operator-config-bundle-secret.adoc @@ -1,4 +1,7 @@ -[[operator-config-bundle-secret]] +:_content-type: REFERENCE +[id="operator-config-bundle-secret"] = Config bundle secret -The `spec.configBundleSecret` field is a reference to the `metadata.name` of a `Secret` in the same namespace as the `QuayRegistry`. This `Secret` must contain a `config.yaml` key/value pair. This `config.yaml` file is a Quay config YAML file. This field is optional, and will be auto-filled by the Operator if not provided. If provided, it serves as the base set of config fields which are later merged with other fields from any managed components to form a final output `Secret`, which is then mounted into the Quay application pods. \ No newline at end of file +The `spec.configBundleSecret` field is a reference to the `metadata.name` of a `Secret` in the same namespace as the `QuayRegistry` resource. This `Secret` must contain a `config.yaml` key/value pair. + +The `config.yaml` file is a {productname} `config.yaml` file. This field is optional, and is auto-filled by the {productname} Operator if not provided. If provided, it serves as the base set of config fields which are later merged with other fields from any managed components to form a final output `Secret`, which is then mounted into the {productname} application pods. \ No newline at end of file diff --git a/modules/operator-config-cli-access.adoc b/modules/operator-config-cli-access.adoc index 11b77be7a..ed06521cb 100644 --- a/modules/operator-config-cli-access.adoc +++ b/modules/operator-config-cli-access.adoc @@ -1,12 +1,22 @@ -[[operator-config-cli-access]] +:_content-type: PROCEDURE +[id="operator-config-cli-access"] = Determining QuayRegistry endpoints and secrets -You can examine the QuayRegistry resource, using `oc describe quayregistry` or `oc get quayregistry -o yaml`, to determine the current endpoints and secrets: +Use the following procedure to find `QuayRegistry` endpoints and secrets. -[source,yaml] +.Procedure + +. You can examine the `QuayRegistry` resource, using `oc describe quayregistry` or `oc get quayregistry -o yaml`, to find the current endpoints and secrets by entering the following command: ++ +[source,terminal] ---- $ oc get quayregistry example-registry -n quay-enterprise -o yaml - +---- ++ +.Example output ++ +[source,yaml] +---- apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: @@ -21,49 +31,66 @@ spec: ... - kind: clairpostgres managed: true - configBundleSecret: init-config-bundle-secret + configBundleSecret: init-config-bundle-secret <1> status: - configEditorCredentialsSecret: example-registry-quay-config-editor-credentials-fg2gdgtm24 - configEditorEndpoint: https://example-registry-quay-config-editor-quay-enterprise.apps.docs.gcp.quaydev.org currentVersion: 3.7.0 lastUpdated: 2022-05-11 13:28:38.199476938 +0000 UTC - registryEndpoint: https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org - + registryEndpoint: https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org <2> ---- +<1> The config bundle secret, containing the `config.yaml` file and any SSL/TLS certificates. +<2> The URL for your registry, for browser access to the registry UI, and for the registry API endpoint. -The relevant fields are: +//// +[id="determining-username-password-config-editor-tool"] +== Locating the username and password for the config editor tool -* `registryEndpoint`: The URL for your registry, for browser access to the registry UI, and for the registry API endpoint -* `configBundleSecret`: The config bundle secret, containing the `config.yaml` file and any SSL certs -* `configEditorEndpoint`: The URL for the config editor tool, for browser access to the config tool, and for the configuration API -* `configEditorCredentialsSecret`: The secret containing the username (typically `quayconfig`) and the password for the config editor tool +Use the following procedure to locate the username and password for the config editor tool. +.Procedure -To determine the username and password for the config editor tool: - -. Retrieve the secret: +. Enter the following command to retrieve the secret: + -[source,yaml] +[source,terminal] ---- $ oc get secret -n quay-enterprise example-registry-quay-config-editor-credentials-fg2gdgtm24 -o yaml - +---- ++ +.Example output ++ +[source,yaml] +---- apiVersion: v1 data: password: SkZwQkVKTUN0a1BUZmp4dA== username: cXVheWNvbmZpZw== kind: Secret ---- -. Decode the username: + +. Decode the username by entering the following command: + +[source,terminal] ---- $ echo 'cXVheWNvbmZpZw==' | base64 --decode - +---- ++ +.Example output ++ +[source,terminal] +---- quayconfig ---- -. Decode the password: + +. Decode the password by entering the following command: + +[source,terminal] ---- $ echo 'SkZwQkVKTUN0a1BUZmp4dA==' | base64 --decode - +---- ++ +.Example output ++ +[source,terminal] +---- JFpBEJMCtkPTfjxt ----- \ No newline at end of file +---- +//// \ No newline at end of file diff --git a/modules/operator-config-cli-download.adoc b/modules/operator-config-cli-download.adoc index 8f89608ac..32267741f 100644 --- a/modules/operator-config-cli-download.adoc +++ b/modules/operator-config-cli-download.adoc @@ -1,51 +1,49 @@ -[[operator-config-cli-download]] +[id="operator-config-cli-download"] = Downloading the existing configuration -There are a number of methods for accessing the current configuration: +The following procedure shows you how to download the existing configuration by locating the `Config Bundle Secret`. -. Using the config editor endpoint, specifying the username and password for the config editor: +.Procedure + +. Describe the `QuayRegistry` resource by entering the following command: + -[source,bash] +[source,terminal] ---- -$ curl -k -u quayconfig:JFpBEJMCtkPTfjxt https://example-registry-quay-config-editor-quay-enterprise.apps.docs.quayteam.org/api/v1/config +$ oc describe quayregistry -n ---- + -[source,yaml] +[source,terminal] ---- -{ - "config.yaml": { - "ALLOW_PULLS_WITHOUT_STRICT_LOGGING": false, - "AUTHENTICATION_TYPE": "Database", - ... - "USER_RECOVERY_TOKEN_LIFETIME": "30m" - }, - "certs": { - "extra_ca_certs/service-ca.crt": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURVVENDQWptZ0F3SUJBZ0lJRE9kWFhuUXFjMUF3RFFZSktvWklodmNOQVFFTEJRQXdOakUwTURJR0ExVUUKQXd3cmIzQmxibk5vYVdaMExYTmxjblpwWTJVdGMyVnlkbWx1WnkxemFXZHVaWEpBTVRZek1UYzNPREV3TXpBZQpGdzB5TVRBNU1UWXdOelF4TkRKYUZ..." - } -} +# ... + Config Bundle Secret: example-registry-config-bundle-v123x +# ... ---- -. Using the config bundle secret -.. Get the secret data: + +. Obtain the secret data by entering the following command: + -[source,bash] +[source,terminal] ---- -$ oc get secret -n quay-enterprise init-config-bundle-secret -o jsonpath='{.data}' +$ oc get secret -n -o jsonpath='{.data}' ---- + -.Sample output +.Example output ++ [source,yaml] ---- { "config.yaml": "RkVBVFVSRV9VU0 ... MDAwMAo=" } ---- -.. Decode the data: + +. Decode the data by entering the following command: + -[source,bash] +[source,terminal] ---- $ echo 'RkVBVFVSRV9VU0 ... MDAwMAo=' | base64 --decode ---- + +.Example output ++ [source,yaml] ---- FEATURE_USER_INITIALIZE: true @@ -59,3 +57,9 @@ FEATURE_BUILD_SUPPORT: true DEFAULT_SYSTEM_REJECT_QUOTA_BYTES: 102400000 ---- +. Optional. You can export the data into a YAML file into the current directory by passing in the `>> config.yaml` flag. For example: ++ +[source,terminal] +---- +$ echo 'RkVBVFVSRV9VU0 ... MDAwMAo=' | base64 --decode >> config.yaml +---- \ No newline at end of file diff --git a/modules/operator-config-cli.adoc b/modules/operator-config-cli.adoc index 14a5c43c9..dac3a809c 100644 --- a/modules/operator-config-cli.adoc +++ b/modules/operator-config-cli.adoc @@ -1,23 +1,26 @@ -[[operator-config-cli]] -= Configuring Quay on OpenShift +:_content-type: PROCEDURE +[id="operator-config-cli"] += Customizing {productname} on {ocp} -Once deployed, you can configure the Quay application by editing the Quay configuration bundle secret `spec.configBundleSecret` and you can also change the managed status of components in the `spec.components` object of the QuayRegistry resource +After deployment, you can customize the {productname} application by editing the {productname} configuration bundle secret `spec.configBundleSecret`. You can also change the managed status of components and configure resource requests for some components in the `spec.components` object of the `QuayRegistry` resource. -Alternatively, you can use the config editor UI to configure the Quay application, as described in the section xref:operator-config-ui[]. +[id="editing-config-bundle-secret-in-ocp-console"] +== Editing the config bundle secret in the {ocp} console -== Editing the config bundle secret in the OpenShift console +Use the following procedure to edit the config bundle secret in the {ocp} console. .Procedure -. On the Quay Registry overview screen, click the link for the Config Bundle Secret: + +. On the {productname} Registry overview screen, click the link for the *Config Bundle Secret*. + -image:operator-quay-registry-overview.png[Quay Registry overview] +image:operator-quay-registry-overview.png[{productname} Registry overview] -. To edit the secret, click **Actions** -> **Edit Secret** +. To edit the secret, click **Actions** -> **Edit Secret**. + image:operator-config-bundle-edit-secret.png[Edit secret] -. Modify the configuration and save the changes +. Modify the configuration and save the changes. + image:operator-save-config-changes.png[Save changes] -. Monitor the deployment to ensure successful completion and that the configuration changes have taken effect +. Monitor the deployment to ensure successful completion and that the configuration changes have taken effect. diff --git a/modules/operator-config-ui-access.adoc b/modules/operator-config-ui-access.adoc index c1e63b705..62cc22530 100644 --- a/modules/operator-config-ui-access.adoc +++ b/modules/operator-config-ui-access.adoc @@ -1,28 +1,40 @@ -[[operator-config-ui-access]] +:_content-type: PROCEDURE +[id="operator-config-ui-access"] = Accessing the config editor -In the Details section of the QuayRegistry screen, the endpoint for the config editor is available, along with a link to the secret containing the credentials for logging into the config editor: +[NOTE] +==== +The Config Editor UI has been removed and is not supported on IBM Power and IBM Z. +==== + +In the *Details* section of the `QuayRegistry` object, the endpoint for the config editor is available, along with a link to the `Secret` object that contains the credentials for logging into the config editor. For example: image:config-editor-details-openshift.png[Config editor details] +[id="retrieving-the-config-editor-credentials"] +== Retrieving the config editor credentials +Use the following procedure to retrieve the config editor credentials. -== Retrieving the config editor credentials +.Procedure . Click on the link for the config editor secret: + image:config-editor-secret.png[Config editor secret] - -. In the Data section of the Secret details screen, click `Reveal values` to see the credentials for logging in to the config editor: +. In the *Data* section of the *Secret* details page, click *Reveal values* to see the credentials for logging into the config editor. For example: + image:config-editor-secret-reveal.png[Config editor secret reveal] +[id="logging-into-config-editor"] +== Logging into the config editor -== Logging in to the config editor +Use the following procedure to log into the config editor. -Browse to the config editor endpoint and then enter the username, typically `quayconfig`, and the corresponding password to access the config tool: +.Procedure +* Navigate the config editor endpoint. When prompted, enter the username, for example, `quayconfig`, and the password. For example: ++ image:config-editor-ui.png[Config editor user interface] diff --git a/modules/operator-config-ui-change.adoc b/modules/operator-config-ui-change.adoc index 37b594528..6d3c3601d 100644 --- a/modules/operator-config-ui-change.adoc +++ b/modules/operator-config-ui-change.adoc @@ -1,24 +1,31 @@ -[[operator-config-ui-change]] +:_content-type: PROCEDURE +[id="operator-config-ui-change"] == Changing configuration -In this example of updating the configuration, a superuser is added via the config editor tool: +In the following example, you will update your configuration file by changing the default expiration period of deleted tags. -. Add an expiration period, for example `4w`, for the time machine functionality: +.Procedure + +. On the config editor, locate the *Time Machine* section. + +. Add an expiration period to the *Allowed expiration periods* box, for example, `4w`: + image:ui-time-machine-add.png[Add expiration period] -. Select `Validate Configuration Changes` to ensure that the changes are valid -. Apply the changes by pressing the `Reconfigure Quay` button: + +. Select *Validate Configuration Changes* to ensure that the changes are valid. + +. Apply the changes by pressing *Reconfigure Quay*: + image:config-editor-reconfigure.png[Reconfigure] -. The config tool notifies you that the change has been submitted to Quay: -+ +After applying the changes, the config tool notifies you that the changes made have been submitted to your {productname} deployment: + image:config-editor-reconfigured.png[Reconfigured] [NOTE] ==== -Reconfiguring {productname} using the config tool UI can lead to the registry being unavailable for a short time, while the updated configuration is applied. +Reconfiguring {productname} using the config tool UI can lead to the registry being unavailable for a short time while the updated configuration is applied. ==== diff --git a/modules/operator-config-ui-monitoring.adoc b/modules/operator-config-ui-monitoring.adoc index b4067eabb..1c64b976b 100644 --- a/modules/operator-config-ui-monitoring.adoc +++ b/modules/operator-config-ui-monitoring.adoc @@ -1,24 +1,27 @@ -[[operator-config-ui-monitoring]] -= Monitoring reconfiguration in the UI +:_content-type: CONCEPT +[id="operator-config-ui-monitoring"] += Monitoring reconfiguration in the {productname} UI +You can monitor the reconfiguration of {productname} in real-time. +[id="reconfiguring-quayregistry-resource"] == QuayRegistry resource -After reconfiguring the Operator, you can track the progress of the redeployment in the YAML tab for the specific instance of QuayRegistry, in this case, `example-registry`: +After reconfiguring the {productname} Operator, you can track the progress of the redeployment in the *YAML* tab for the specific instance of `QuayRegistry`, in this case, `example-registry`: image:ui-monitor-deploy-update.png[] -Each time the status changes, you will be prompted to reload the data to see the updated version. Eventually, the Operator will reconcile the changes, and there will be no unhealthy components reported. +Each time the status changes, you will be prompted to reload the data to see the updated version. Eventually, the {productname} Operator reconciles the changes, and there are be no unhealthy components reported. image:ui-monitor-deploy-done.png[] - +[id="reconfiguring-events-tab"] == Events -The Events tab for the QuayRegistry shows some events related to the redeployment: +The *Events* tab for the `QuayRegistry` shows some events related to the redeployment. For example: image:ui-monitor-deploy-streaming-events.png[] -Streaming events, for all resources in the namespace that are affected by the reconfiguration, are available in the OpenShift console under Home -> Events: +Streaming events, for all resources in the namespace that are affected by the reconfiguration, are available in the {ocp} console under *Home* -> *Events*. For example: image:ui-monitor-deploy-streaming-events.png[] \ No newline at end of file diff --git a/modules/operator-config-ui-updated.adoc b/modules/operator-config-ui-updated.adoc index 5d262e7cf..fabe731db 100644 --- a/modules/operator-config-ui-updated.adoc +++ b/modules/operator-config-ui-updated.adoc @@ -1,23 +1,20 @@ -[[operator-config-ui-updated]] +:_content-type: PROCEDURE +[id="operator-config-ui-updated"] = Accessing updated information after reconfiguration -== Accessing the updated config tool credentials in the UI +Use the following procedure to access the updated `config.yaml` file using the {productname} UI and the config bundle. -With {productname} 3.7, reconfiguring Quay through the UI no longer generates a new login password. The password now generates only once, and remains the same after reconciling `QuayRegistry` objects. +.Procedure -== Accessing the updated config.yaml in the UI +. On the `QuayRegistry` *Details* screen, click on the *Config Bundle Secret*. -Use the config bundle to access the updated `config.yaml` file. +. In the *Data* section of the `Secret` details screen, click *Reveal values* to see the `config.yaml` file. -. On the QuayRegistry details screen, click on the Config Bundle Secret - -. In the Data section of the Secret details screen, click Reveal values to see the `config.yaml` file - -. Check that the change has been applied. In this case, `4w` should be in the list of `TAG_EXPIRATION_OPTIONS`: +. Check that the change has been applied. In this case, `4w` should be in the list of `TAG_EXPIRATION_OPTIONS`. For example: + [source,yaml] ---- -... +--- SERVER_HOSTNAME: example-quay-openshift-operators.apps.docs.quayteam.org SETUP_COMPLETE: true SUPER_USERS: @@ -25,5 +22,6 @@ SUPER_USERS: TAG_EXPIRATION_OPTIONS: - 2w - 4w -... +- 3y +--- ---- diff --git a/modules/operator-config-ui.adoc b/modules/operator-config-ui.adoc index 508edff2f..3c197ef34 100644 --- a/modules/operator-config-ui.adoc +++ b/modules/operator-config-ui.adoc @@ -1,3 +1,9 @@ -[[operator-config-ui]] -= Using the config tool to reconfigure Quay on OpenShift +:_content-type: CONCEPT +[id="operator-config-ui"] += Using the config tool to reconfigure {productname} on {ocp} +As of {productname} 3.10, the configuration tool has been removed on {ocp} deployments, meaning that users cannot configure, or reconfigure, directly from the {ocp} console. Additionally, the `quay-config-editor` pod no longer deploys, users cannot check the status of the config editor route, and the Config Editor Endpoint no longer generates on the {productname} Operator *Details* page + +As a workaround, you can deploy the configuration tool locally and create your own configuration bundle. This includes entering the database and storage credentials used for your {productname} on {ocp} deployment, generating a `config.yaml` file, and using it to deploy {productname} on {ocp} via the command-line interface. + +To deploy the configuration tool locally, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.10/html-single/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/index#poc-getting-started[Getting started with {productname}] and follow the instructions up to "Configuring {productname}". Advanced configuration settings, such as using custom SSL certificates, can be found on the same page. \ No newline at end of file diff --git a/modules/operator-console-monitoring-alerting.adoc b/modules/operator-console-monitoring-alerting.adoc index 2c0401022..3b581b28d 100644 --- a/modules/operator-console-monitoring-alerting.adoc +++ b/modules/operator-console-monitoring-alerting.adoc @@ -1,32 +1,36 @@ -[[operator-console-monitoring-alerting]] +:_content-type: CONCEPT +[id="operator-console-monitoring-alerting"] = Console monitoring and alerting -{productname} provides support for monitoring Quay instances that were deployed using the Operator, from inside the OpenShift console. The new monitoring features include a Grafana dashboard, access to individual metrics, and alerting to notify for frequently restarting Quay pods. +{productname} provides support for monitoring instances that were deployed by using the {productname} Operator, from inside the {ocp} console. The new monitoring features include a Grafana dashboard, access to individual metrics, and alerting to notify for frequently restarting `Quay` pods. [NOTE] ==== -To enable the monitoring features, the Operator must be installed in "all namespaces" mode. +To enable the monitoring features, you must select *All namespaces on the cluster* as the installation mode when installing the {productname} Operator. ==== +[id="operator-dashboard"] == Dashboard -In the OpenShift console, navigate to Monitoring -> Dashboards and search for the dashboard of your desired Quay registry instance: +On the {ocp} console, click *Monitoring* -> *Dashboards* and search for the dashboard of your desired {productname} registry instance: image:choose-dashboard.png[Choose Quay dashboard] -The dashboard shows various statistics including: +The dashboard shows various statistics including the following: -* The number of Organizations, Repositories, Users and Robot accounts -* CPU Usage and Max Memory Usage -* Rates of Image Pulls and Pushes, and Authentication requests +* The number of *Organizations*, *Repositories*, *Users*, and *Robot accounts* +* CPU Usage +* Max memory usage +* Rates of pulls and pushes, and authentication requests * API request rate * Latencies image:console-dashboard-1.png[Console dashboard] +[id="operator-metrics"] == Metrics -You can see the underlying metrics behind the Quay dashboard, by accessing Monitoring -> Metrics in the UI. In the Expression field, enter the text `quay_` to see the list of metrics available: +You can see the underlying metrics behind the {productname} dashboard by accessing *Monitoring* -> *Metrics* in the UI. In the *Expression* field, enter the text `quay_` to see the list of metrics available: image:quay-metrics.png[Quay metrics] @@ -34,14 +38,15 @@ Select a sample metric, for example, `quay_org_rows`: image:quay-metrics-org-rows.png[Number of Quay organizations] -This metric shows the number of organizations in the registry, and it is directly surfaced in the dashboard as well. +This metric shows the number of organizations in the registry. It is also directly surfaced in the dashboard. +[id="operator-alerting"] == Alerting -An alert is raised if the Quay pods restart too often. The alert can be configured by accessing the Alerting rules tab from Monitoring -> Alerting in the consol UI and searching for the Quay-specific alert: +An alert is raised if the `Quay` pods restart too often. The alert can be configured by accessing the *Alerting* rules tab from *Monitoring* -> *Alerting* in the console UI and searching for the Quay-specific alert: image:alerting-rules.png[Alerting rules] -Select the QuayPodFrequentlyRestarting rule detail to configure the alert: +Select the `QuayPodFrequentlyRestarting` rule detail to configure the alert: image:quay-pod-frequently-restarting.png[Alerting rule details] diff --git a/modules/operator-custom-ssl-certs-config-bundle.adoc b/modules/operator-custom-ssl-certs-config-bundle.adoc index 4675f7161..d21be4c40 100644 --- a/modules/operator-custom-ssl-certs-config-bundle.adoc +++ b/modules/operator-custom-ssl-certs-config-bundle.adoc @@ -1,95 +1,21 @@ -[[operator-custom-ssl-certs-config-bundle]] -= Using the config bundle to configure custom SSL certs +:_content-type: PROCEDURE +[id="operator-custom-ssl-certs-config-bundle"] += Configuring custom SSL/TLS certificates for {productname-ocp} -You can configure custom SSL certs either before initial deployment or after {productname} is deployed on OpenShift, by creating or updating the config bundle secret. If you are adding the cert(s) to an existing deployment, you must include the existing `config.yaml` in the new config bundle secret, even if you are not making any configuration changes. +When {productname} is deployed on {ocp}, the `tls` component of the `QuayRegistry` custom resource definition (CRD) is set to `managed` by default. As a result, {ocp}'s Certificate Authority is used to create HTTPS endpoints and to rotate SSL/TLS certificates. -== Set TLS to unmanaged +You can configure custom SSL/TLS certificates before or after the initial deployment of {productname-ocp}. This process involves creating or updating the `configBundleSecret` resource within the `QuayRegistry` YAML file to integrate your custom certificates and setting the `tls` component to `unmanaged`. -In your Quay Registry yaml, set `kind: tls` to `managed: false`: +[IMPORTANT] +==== +When configuring custom SSL/TLS certificates for {productname}, administrators are responsible for certificate rotation. +==== -[source,yaml] ----- - - kind: tls - managed: false ----- +The following procedures enable you to apply custom SSL/TLS certificates to ensure secure communication and meet specific security requirements for your {productname-ocp} deployment. These steps assumed you have already created a Certificate Authority (CA) bundle or an `ssl.key`, and an `ssl.cert`. The procedure then shows you how to integrate those files into your {productname-ocp} deployment, which ensures that your registry operates with the specified security settings and conforms to your organization's SSL/TLS policies. -In the events, you should see that the change is blocked until you set up the appropriate config: - -[source,yaml] ----- - - lastTransitionTime: '2022-03-28T12:56:49Z' - lastUpdateTime: '2022-03-28T12:56:49Z' - message: >- - required component `tls` marked as unmanaged, but `configBundleSecret` - is missing necessary fields - reason: ConfigInvalid - status: 'True' - ----- - -== Add certs to config bundle - -.Procedure -. Create the secret using embedded data or using files: -.. Embed the configuration details directly in the Secret resource YAML file, for example: -+ -[source,yaml] -.custom-ssl-config-bundle.yaml ----- -apiVersion: v1 -kind: Secret -metadata: - name: custom-ssl-config-bundle-secret - namespace: quay-enterprise -data: - config.yaml: | - FEATURE_USER_INITIALIZE: true - BROWSER_API_CALLS_XHR_ONLY: false - SUPER_USERS: - - quayadmin - FEATURE_USER_CREATION: false - FEATURE_QUOTA_MANAGEMENT: true - FEATURE_PROXY_CACHE: true - FEATURE_BUILD_SUPPORT: true - DEFAULT_SYSTEM_REJECT_QUOTA_BYTES: 102400000 - extra_ca_cert_my-custom-ssl.crt: | - -----BEGIN CERTIFICATE----- - MIIDsDCCApigAwIBAgIUCqlzkHjF5i5TXLFy+sepFrZr/UswDQYJKoZIhvcNAQEL - BQAwbzELMAkGA1UEBhMCSUUxDzANBgNVBAgMBkdBTFdBWTEPMA0GA1UEBwwGR0FM - .... - -----END CERTIFICATE----- ----- -+ -Next, create the secret from the YAML file: -+ ----- -$ oc create -f custom-ssl-config-bundle.yaml ----- -.. Alternatively, you can create files containing the desired information, and then create the secret from those files: -+ ----- -$ oc create secret generic custom-ssl-config-bundle-secret \ - --from-file=config.yaml \ - --from-file=extra_ca_cert_my-custom-ssl.crt=my-custom-ssl.crt ----- - - -. Create or update the QuayRegistry YAML file `quayregistry.yaml`, referencing the created Secret, for example: -+ -.quayregistry.yaml -[source,yaml] ----- -apiVersion: quay.redhat.com/v1 -kind: QuayRegistry -metadata: - name: example-registry - namespace: quay-enterprise -spec: - configBundleSecret: custom-ssl-config-bundle-secret ----- -. Deploy or update the registry using the YAML file: -+ ----- -oc apply -f quayregistry.yaml ----- +[NOTE] +==== +* The following procedure is used for securing {productname} with an HTTPS certificate. Note that this differs from managing Certificate Authority Trust Bundles. CA Trust Bundles are used by system processes within the `Quay` container to verify certificates against trusted CAs, and ensure that services like LDAP, storage backend, and OIDC connections are trusted. +* If you are adding the certificates to an existing deployment, you must include the existing `config.yaml` file in the new config bundle secret, even if you are not making any configuration changes. +==== \ No newline at end of file diff --git a/modules/operator-customize-images.adoc b/modules/operator-customize-images.adoc index 10157b729..06f7699ac 100644 --- a/modules/operator-customize-images.adoc +++ b/modules/operator-customize-images.adoc @@ -1,15 +1,23 @@ -[[operator-customize-images]] +:_content-type: PROCEDURE +[id="operator-customize-images"] = Customizing Default Operator Images [NOTE] ==== -Using this mechanism is not supported for production Quay environments and is strongly encouraged only for development/testing purposes. There is no guarantee your deployment will work correctly when using non-default images with the Quay Operator. +Currently, customizing default Operator images is not supported on IBM Power and IBM Z. ==== -In certain circumstances, it may be useful to override the default images used by the Operator. This can be done by setting one or more environment variables in the Quay Operator `ClusterServiceVersion`. +In certain circumstances, it might be useful to override the default images used by the {productname} Operator. This can be done by setting one or more environment variables in the {productname} Operator `ClusterServiceVersion`. +[IMPORTANT] +==== +Using this mechanism is not supported for production {productname} environments and is strongly encouraged only for development or testing purposes. There is no guarantee your deployment will work correctly when using non-default images with the {productname} Operator. +==== + +[id="custom-environment-variables"] == Environment Variables -The following environment variables are used in the Operator to override component images: + +The following environment variables are used in the {productname} Operator to override component images: [cols=2*] |=== @@ -31,21 +39,29 @@ The following environment variables are used in the Operator to override compone [NOTE] ==== -Override images *must* be referenced by manifest (@sha256:), not by tag (:latest). +Overridden images *must* be referenced by manifest (@sha256:) and not by tag (:latest). ==== -== Applying Overrides to a Running Operator +[id="applying-overrides-to-running-operator"] +== Applying overrides to a running Operator -When the Quay Operator is installed in a cluster via the link:https://docs.openshift.com/container-platform/4.6/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)], the managed component container images can be easily overridden by modifying the `ClusterServiceVersion` object, which is OLM's representation of a running Operator in the cluster. Find the Quay Operator's `ClusterServiceVersion` either by using a Kubernetes UI or `kubectl`/`oc`: +When the {productname} Operator is installed in a cluster through the link:https://docs.openshift.com/container-platform/{ocp-y}/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)], the managed component container images can be easily overridden by modifying the `ClusterServiceVersion` object. -``` -$ oc get clusterserviceversions -n -``` +Use the following procedure to apply overrides to a running {productname} Operator. -Using the UI, `oc edit`, or any other method, modify the Quay `ClusterServiceVersion` to include the environment variables outlined above to point to the override images: +.Procedure -*JSONPath*: `spec.install.spec.deployments[0].spec.template.spec.containers[0].env` +. The `ClusterServiceVersion` object is Operator Lifecycle Manager's representation of a running Operator in the cluster. Find the {productname} Operator's `ClusterServiceVersion` by using a Kubernetes UI or the `kubectl`/`oc` CLI tool. For example: ++ +[source,terminal] +---- +$ oc get clusterserviceversions -n +---- +. Using the UI, `oc edit`, or another method, modify the {productname} `ClusterServiceVersion` to include the environment variables outlined above to point to the override images: ++ +*JSONPath*: `spec.install.spec.deployments[0].spec.template.spec.containers[0].env` ++ [source,yaml] ---- - name: RELATED_IMAGE_COMPONENT_QUAY @@ -58,7 +74,7 @@ Using the UI, `oc edit`, or any other method, modify the Quay `ClusterServiceVer value: centos/redis-32-centos7@sha256:06dbb609484330ec6be6090109f1fa16e936afcf975d1cbc5fff3e6c7cae7542 ---- -Note that this is done at the Operator level, so every QuayRegistry will be deployed using these same overrides. - - - +[NOTE] +==== +This is done at the Operator level, so every `QuayRegistry` will be deployed using these same overrides. +==== diff --git a/modules/operator-deploy-cli.adoc b/modules/operator-deploy-cli.adoc index 2d04edf6b..55618c9fc 100644 --- a/modules/operator-deploy-cli.adoc +++ b/modules/operator-deploy-cli.adoc @@ -1,10 +1,31 @@ -[[operator-deploy-cli]] +:_content-type: PROCEDURE +[id="operator-deploy-cli"] = Deploying {productname} from the command line +Use the following procedure to deploy {productname} from using the command-line interface (CLI). + +.Prerequisites + +* You have logged into {ocp} using the CLI. + +.Procedure + +. Create a namespace, for example, `quay-enterprise`, by entering the following command: ++ +[source,terminal] +---- +$ oc new-project quay-enterprise +---- + +. Optional. If you want to pre-configure any aspects of your {productname} deployment, create a `Secret` for the config bundle: ++ +[source,terminal] +---- +$ oc create secret generic quay-enterprise-config-bundle --from-file=config-bundle.tar.gz=/path/to/config-bundle.tar.gz +---- -. Create a namespace, for example, `quay-enterprise`. -. Create a secret for the config bundle, if you want to pre-configure any aspects of the deployment . Create a `QuayRegistry` custom resource in a file called `quayregistry.yaml` + .. For a minimal deployment, using all the defaults: + .quayregistry.yaml: @@ -16,9 +37,11 @@ metadata: name: example-registry namespace: quay-enterprise ---- -.. If you want to have some components unmanaged, add this information in the `spec` field. For example, a minimal deployment might look like: + +.. Optional. If you want to have some components unmanaged, add this information in the `spec` field. A minimal deployment might look like the following example: ++ +.Example quayregistry.yaml with unmanaged components + -.quayregistry.yaml: [source,yaml] ---- apiVersion: quay.redhat.com/v1 @@ -37,9 +60,11 @@ spec: - kind: monitoring managed: false ---- -.. If you have created a config bundle, for example, `init-config-bundle-secret`, reference it in the `quayregistry.yaml` file: + +.. Optional. If you have created a config bundle, for example, `init-config-bundle-secret`, reference it in the `quayregistry.yaml` file: ++ +.Example quayregistry.yaml with a config bundle + -.quayregistry.yaml: [source,yaml] ---- apiVersion: quay.redhat.com/v1 @@ -51,9 +76,10 @@ spec: configBundleSecret: init-config-bundle-secret ---- -.. If you have a proxy configured, you can add the information using overrides for Quay, Clair, and mirroring: +.. Optional. If you have a proxy configured, you can add the information using overrides for {productname}, Clair, and mirroring: ++ +.Example quayregistry.yaml with proxy configured + -.quayregistry.yaml: [source,yaml] ---- kind: QuayRegistry @@ -104,14 +130,20 @@ spec: value: quayproxy.qe.devcluster.openshift.com:3128 ---- -. Create the `QuayRegistry` in specified namespace: +. Create the `QuayRegistry` in the specified namespace by entering the following command: + -```sh +[source,terminal] +---- $ oc create -n quay-enterprise -f quayregistry.yaml -``` -. See the section xref:operator-monitor-deploy-cli[Monitoring and debugging the deployment process] for information on how to track the progress of the deployment. -. Wait until the `status.registryEndpoint` is populated. +---- + +. Enter the following command to see when the `status.registryEndpoint` is populated: + -```sh +[source,terminal] +---- $ oc get quayregistry -n quay-enterprise example-registry -o jsonpath="{.status.registryEndpoint}" -w -``` +---- + +.Additional resources + +* For more information about how to track the progress of your {productname} deployment, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-monitor-deploy-cli[Monitoring and debugging the deployment process]. \ No newline at end of file diff --git a/modules/operator-deploy-hpa.adoc b/modules/operator-deploy-hpa.adoc index 57a2cbb90..00e13f949 100644 --- a/modules/operator-deploy-hpa.adoc +++ b/modules/operator-deploy-hpa.adoc @@ -1,22 +1,25 @@ -[[operator-deploy-hpa]] -= Horizontal Pod Autoscaling (HPA) +:_content-type: REFERENCE +[id="operator-deploy-hpa"] += Horizontal Pod Autoscaling A default deployment shows the following running pods: -* Two pods for the Quay application itself (`example-registry-quay-app-*``) -* One Redis pod for Quay logging (`example-registry-quay-redis-*`) -* One database pod for PostgreSQL used by Quay for metadata storage (`example-registry-quay-database-*`) -* One pod for the Quay config editor (`example-registry-quay-config-editor-*`) -* Two Quay mirroring pods (`example-registry-quay-mirror-*`) +* Two pods for the {productname} application itself (`example-registry-quay-app-*``) +* One Redis pod for {productname} logging (`example-registry-quay-redis-*`) +* One database pod for PostgreSQL used by {productname} for metadata storage (`example-registry-quay-database-*`) +* Two `Quay` mirroring pods (`example-registry-quay-mirror-*`) * Two pods for the Clair application (`example-registry-clair-app-*`) * One PostgreSQL pod for Clair (`example-registry-clair-postgres-*`) - -As HPA is configured by default to be `managed`, the number of pods for Quay, Clair and repository mirroring is set to two. This facilitates the avoidance of downtime when updating / reconfiguring Quay via the Operator or during rescheduling events. +Horizontal PPod Autoscaling is configured by default to be `managed`, and the number of pods for Quay, Clair and repository mirroring is set to two. This facilitates the avoidance of downtime when updating or reconfiguring {productname} through the {productname} Operator or during rescheduling events. You can enter the following command to view information about HPA objects: [source,terminal] ---- $ oc get hpa -n quay-enterprise +---- +.Example output +[source,terminal] +---- NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE example-registry-clair-app Deployment/example-registry-clair-app 16%/90%, 0%/90% 2 10 2 13d example-registry-quay-app Deployment/example-registry-quay-app 31%/90%, 1%/90% 2 20 2 13d diff --git a/modules/operator-deploy-infrastructure.adoc b/modules/operator-deploy-infrastructure.adoc index 263f120ef..0885f0a51 100644 --- a/modules/operator-deploy-infrastructure.adoc +++ b/modules/operator-deploy-infrastructure.adoc @@ -1,19 +1,26 @@ -[[operator-deploy-infrastructure]] -= Deploying Quay on infrastructure nodes +[id="operator-deploy-infrastructure"] += Deploying {productname} on infrastructure nodes -By default, Quay-related pods are placed on arbitrary worker nodes when using the Operator to deploy the registry. The OpenShift Container Platform documentation shows how to use machine sets to configure nodes to only host infrastructure components (see link:https://docs.openshift.com/container-platform/4.7/machine_management/creating-infrastructure-machinesets.html[]). +By default, `Quay` related pods are placed on arbitrary worker nodes when using the {productname} Operator to deploy the registry. For more information about how to use machine sets to configure nodes to only host infrastructure components, see link:https://docs.openshift.com/container-platform/{ocp-y}/machine_management/creating-infrastructure-machinesets.html[Creating infrastructure machine sets]. +If you are not using {ocp} machine set resources to deploy infra nodes, the section in this document shows you how to manually label and taint nodes for infrastructure purposes. After you have configured your infrastructure nodes either manually or use machines sets, you can control the placement of `Quay` pods on these nodes using node selectors and tolerations. -If you are not using OCP MachineSet resources to deploy infra nodes, this section shows you how to manually label and taint nodes for infrastructure purposes. +[id="labeling-taint-nodes-for-infrastructure-use"] +== Labeling and tainting nodes for infrastructure use -Once you have configured your infrastructure nodes, either manually or using machine sets, you can then control the placement of Quay pods on these nodes using node selectors and tolerations. - -== Label and taint nodes for infrastructure use - -In the cluster used in this example, there are three master nodes and six worker nodes: +Use the following procedure to label and tain nodes for infrastructure use. +. Enter the following command to reveal the master and worker nodes. In this example, there are three master nodes and six worker nodes. ++ +[source,terminal] ---- $ oc get nodes +---- ++ +.Example output ++ +[source,terminal] +---- NAME                                               STATUS   ROLES    AGE     VERSION user1-jcnp6-master-0.c.quay-devel.internal         Ready    master   3h30m   v1.20.0+ba45583 user1-jcnp6-master-1.c.quay-devel.internal         Ready    master   3h30m   v1.20.0+ba45583 @@ -26,18 +33,34 @@ user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal   Ready    worker   3h22m user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal   Ready    worker   3h21m   v1.20.0+ba45583 ---- -Label the final three worker nodes for infrastructure use: - +. Enter the following commands to label the three worker nodes for infrastructure use: ++ +[source,terminal] ---- $ oc label node --overwrite user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal node-role.kubernetes.io/infra= +---- ++ +[source,terminal] +---- $ oc label node --overwrite user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal node-role.kubernetes.io/infra= +---- ++ +[source,terminal] +---- $ oc label node --overwrite user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal node-role.kubernetes.io/infra= ---- -Now, when you list the nodes in the cluster, the last 3 worker nodes will have an added role of `infra`: - +. Now, when listing the nodes in the cluster, the last three worker nodes have the `infra` role. For example: ++ +[source,terminal] ---- $ oc get nodes +---- ++ +.Example ++ +[source,terminal] +---- NAME                                               STATUS   ROLES          AGE     VERSION user1-jcnp6-master-0.c.quay-devel.internal         Ready    master         4h14m   v1.20.0+ba45583 user1-jcnp6-master-1.c.quay-devel.internal         Ready    master         4h15m   v1.20.0+ba45583 @@ -47,68 +70,162 @@ user1-jcnp6-worker-b-jr7hc.c.quay-devel.internal   Ready    worker       user1-jcnp6-worker-c-jrq4v.c.quay-devel.internal   Ready    worker         4h5m    v1.20.0+ba45583 user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal   Ready    infra,worker   4h6m    v1.20.0+ba45583 user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal   Ready    infra,worker   4h6m    v1.20.0+ba45583 -user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal   Ready    infra,worker   4h6m    v1.20.0+ba45583 +user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal   Ready    infra,worker   4h6m    v1.20.0+ba4558 ---- -With an infra node being assigned as a worker, there is a chance that user workloads could get inadvertently assigned to an infra node. To avoid this, you can apply a taint to the infra node and then add tolerations for the pods you want to control. - +. When a worker node is assigned the `infra` role, there is a chance that user workloads could get inadvertently assigned to an infra node. To avoid this, you can apply a taint to the infra node, and then add tolerations for the pods that you want to control. For example: ++ +[source,terminal] ---- $ oc adm taint nodes user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal node-role.kubernetes.io/infra:NoSchedule +---- ++ +[source,terminal] +---- $ oc adm taint nodes user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal node-role.kubernetes.io/infra:NoSchedule +---- ++ +[source,terminal] +---- $ oc adm taint nodes user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal node-role.kubernetes.io/infra:NoSchedule ---- -== Create a Project with node selector and toleration +[id="creating-project-node-selector-toleration"] +== Creating a project with node selector and tolerations -If you have already deployed Quay using the Quay Operator, remove the installed operator and any specific namespace(s) you created for the deployment. +Use the following procedure to create a project with node selector and tolerations. -Create a Project resource, specifying a node selector and toleration as shown in the following example: +[NOTE] +==== +The following procedure can also be completed by removing the installed {productname} Operator and the namespace, or namespaces, used when creating the deployment. Users can then create a new resource with the following annotation. +==== -.quay-registry.yaml +.Procedure + +. Enter the following command to edit the namespace where {productname} is deployed, and the following annotation: ++ +[source,terminal] ---- -kind: Project -apiVersion: project.openshift.io/v1 -metadata: -  name: quay-registry -  annotations: -    openshift.io/node-selector: 'node-role.kubernetes.io/infra=' -    scheduler.alpha.kubernetes.io/defaultTolerations: >- -      [{"operator": "Exists", "effect": "NoSchedule", "key": -      "node-role.kubernetes.io/infra"} -      ] +$ oc annotate namespace openshift.io/node-selector='node-role.kubernetes.io/infra=' +---- ++ +Example output ++ +[source,yaml] +---- +namespace/ annotated ---- -Use the `oc apply` command to create the project: - +. Obtain a list of available pods by entering the following command: ++ +[source,terminal] ---- -$ oc apply -f quay-registry.yaml -project.project.openshift.io/quay-registry created +$ oc get pods -o wide ---- ++ +.Example output ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +example-registry-clair-app-5744dd64c9-9d5jt 1/1 Running 0 173m 10.130.4.13 stevsmit-quay-ocp-tes-5gwws-worker-c-6xkn7 +example-registry-clair-app-5744dd64c9-fg86n 1/1 Running 6 (3h21m ago) 3h24m 10.131.0.91 stevsmit-quay-ocp-tes-5gwws-worker-c-dnhdp +example-registry-clair-postgres-845b47cd88-vdchz 1/1 Running 0 3h21m 10.130.4.10 stevsmit-quay-ocp-tes-5gwws-worker-c-6xkn7 +example-registry-quay-app-64cbc5bcf-8zvgc 1/1 Running 1 (3h24m ago) 3h24m 10.130.2.12 stevsmit-quay-ocp-tes-5gwws-worker-a-tk8dx +example-registry-quay-app-64cbc5bcf-pvlz6 1/1 Running 0 3h24m 10.129.4.10 stevsmit-quay-ocp-tes-5gwws-worker-b-fjhz4 +example-registry-quay-app-upgrade-8gspn 0/1 Completed 0 3h24m 10.130.2.10 stevsmit-quay-ocp-tes-5gwws-worker-a-tk8dx +example-registry-quay-database-784d78b6f8-2vkml 1/1 Running 0 3h24m 10.131.4.10 stevsmit-quay-ocp-tes-5gwws-worker-c-2frtg +example-registry-quay-mirror-d5874d8dc-fmknp 1/1 Running 0 3h24m 10.129.4.9 stevsmit-quay-ocp-tes-5gwws-worker-b-fjhz4 +example-registry-quay-mirror-d5874d8dc-t4mff 1/1 Running 0 3h24m 10.129.2.19 stevsmit-quay-ocp-tes-5gwws-worker-a-k7w86 +example-registry-quay-redis-79848898cb-6qf5x 1/1 Running 0 3h24m 10.130.2.11 stevsmit-quay-ocp-tes-5gwws-worker-a-tk8dx -Any subsequent resources created in the `quay-registry` namespace should now be scheduled on the dedicated infrastructure nodes. +---- +. Enter the following command to delete the available pods: ++ +[source,terminal] +---- +$ oc delete pods --selector quay-operator/quayregistry=example-registry -n quay-enterprise +---- ++ +Example output ++ +[source,terminal] +---- +pod "example-registry-clair-app-5744dd64c9-9d5jt" deleted +pod "example-registry-clair-app-5744dd64c9-fg86n" deleted +pod "example-registry-clair-postgres-845b47cd88-vdchz" deleted +pod "example-registry-quay-app-64cbc5bcf-8zvgc" deleted +pod "example-registry-quay-app-64cbc5bcf-pvlz6" deleted +pod "example-registry-quay-app-upgrade-8gspn" deleted +pod "example-registry-quay-database-784d78b6f8-2vkml" deleted +pod "example-registry-quay-mirror-d5874d8dc-fmknp" deleted +pod "example-registry-quay-mirror-d5874d8dc-t4mff" deleted +pod "example-registry-quay-redis-79848898cb-6qf5x" deleted +---- ++ +After the pods have been deleted, they automatically cycle back up and should be scheduled on the dedicated infrastructure nodes. -== Install the Quay Operator in the namespace +//// +. Enter the following command to create the project on infra nodes: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- ++ +.Example output ++ +[source,terminal] +---- +project.project.openshift.io/quay-registry created +---- ++ +Subsequent resources created in the `` namespace should now be scheduled on the dedicated infrastructure nodes. +//// + +[id="installing-quay-operator-namespace"] +== Installing {productname-ocp} on a specific namespace -When installing the Quay Operator, specify the appropriate project namespace explicitly, in this case `quay-registry`. This will result in the operator pod itself landing on one of the three infrastructure nodes: +Use the following procedure to install {productname-ocp} in a specific namespace. +* To install the {productname} Operator in a specific namespace, you must explicitly specify the appropriate project namespace, as in the following command. ++ +In the following example, the `quay-registry` namespace is used. This results in the `quay-operator` pod landing on one of the three infrastructure nodes. For example: ++ +[source,terminal] ---- $ oc get pods -n quay-registry -o wide +---- ++ +.Example output ++ +[source,terminal] +---- NAME                                    READY   STATUS    RESTARTS   AGE   IP            NODE                                               quay-operator.v3.4.1-6f6597d8d8-bd4dp   1/1     Running   0          30s   10.131.0.16   user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal ---- -== Create the registry +[id="creating-registry"] +== Creating the {productname} registry -Create the registry as explained earlier, and then wait for the deployment to be ready. When you list the Quay pods, you should now see that they have only been scheduled on the three nodes that you have labelled for infrastructure purposes: +Use the following procedure to create the {productname} registry. +* Enter the following command to create the {productname} registry. Then, wait for the deployment to be marked as `ready`. In the following example, you should see that they have only been scheduled on the three nodes that you have labelled for infrastructure purposes. ++ +[source,terminal] ---- $ oc get pods -n quay-registry -o wide +---- ++ +.Example output ++ +[source,terminal] +---- NAME                                                   READY   STATUS      RESTARTS   AGE     IP            NODE                                                 example-registry-clair-app-789d6d984d-gpbwd            1/1     Running     1          5m57s   10.130.2.80   user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal example-registry-clair-postgres-7c8697f5-zkzht         1/1     Running     0          4m53s   10.129.2.19   user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal example-registry-quay-app-56dd755b6d-glbf7             1/1     Running     1          5m57s   10.129.2.17   user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal -example-registry-quay-config-editor-7bf9bccc7b-dpc6d   1/1     Running     0          5m57s   10.131.0.23   user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal example-registry-quay-database-8dc7cfd69-dr2cc         1/1     Running     0          5m43s   10.129.2.18   user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal example-registry-quay-mirror-78df886bcc-v75p9          1/1     Running     0          5m16s   10.131.0.24   user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal example-registry-quay-postgres-init-8s8g9              0/1     Completed   0          5m54s   10.130.2.79   user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal diff --git a/modules/operator-deploy-ui.adoc b/modules/operator-deploy-ui.adoc index f8c922bd4..c3ebe8269 100644 --- a/modules/operator-deploy-ui.adoc +++ b/modules/operator-deploy-ui.adoc @@ -1,10 +1,17 @@ -[[operator-deploy-ui]] -= Deploying {productname} from the OpenShift console +:_content-type: PROCEDURE +[id="operator-deploy-ui"] += Deploying {productname} from the {ocp} console . Create a namespace, for example, `quay-enterprise`. -. Select Operators -> Installed Operators, then select the Quay Operator to navigate to the Operator detail view. + +. Select *Operators* -> *Installed Operators*, then select the Quay Operator to navigate to the Operator detail view. + . Click 'Create Instance' on the 'Quay Registry' tile under 'Provided APIs'. + . Optionally change the 'Name' of the `QuayRegistry`. This will affect the hostname of the registry. All other fields have been populated with defaults. + . Click 'Create' to submit the `QuayRegistry` to be deployed by the Quay Operator. + . You should be redirected to the `QuayRegistry` list view. Click on the `QuayRegistry` you just created to see the details view. + . Once the 'Registry Endpoint' has a value, click it to access your new Quay registry via the UI. You can now select 'Create Account' to create a user and sign in. diff --git a/modules/operator-deploy-view-pods-cli.adoc b/modules/operator-deploy-view-pods-cli.adoc index b3a86e12b..dbcd133b2 100644 --- a/modules/operator-deploy-view-pods-cli.adoc +++ b/modules/operator-deploy-view-pods-cli.adoc @@ -1,11 +1,26 @@ -[[operator-deploy-view-pods-cli]] +:_content-type: PROCEDURE +[id="operator-deploy-view-pods-cli"] = Viewing created components using the command line -Use the `oc get pods` command to view the deployed components: +Use the following procedure to view deployed {productname} components. -``` -$ oc get pods -n quay-enterprise +.Prerequisites + +* You have deployed {productname-ocp}. + +.Procedure +. Enter the following command to view the deployed components: ++ +[source,terminal] +---- +$ oc get pods -n quay-enterprise +---- ++ +.Example output ++ +[source,terminal] +---- NAME READY STATUS RESTARTS AGE example-registry-clair-app-5ffc9f77d6-jwr9s 1/1 Running 0 3m42s example-registry-clair-app-5ffc9f77d6-wgp7d 1/1 Running 0 3m41s @@ -13,11 +28,9 @@ example-registry-clair-postgres-54956d6d9c-rgs8l 1/1 Running 0 example-registry-quay-app-79c6b86c7b-8qnr2 1/1 Running 4 3m42s example-registry-quay-app-79c6b86c7b-xk85f 1/1 Running 4 3m41s example-registry-quay-app-upgrade-5kl5r 0/1 Completed 4 3m50s -example-registry-quay-config-editor-597b47c995-svqrl 1/1 Running 0 3m42s example-registry-quay-database-b466fc4d7-tfrnx 1/1 Running 2 3m42s example-registry-quay-mirror-6d9bd78756-6lj6p 1/1 Running 0 2m58s example-registry-quay-mirror-6d9bd78756-bv6gq 1/1 Running 0 2m58s example-registry-quay-postgres-init-dzbmx 0/1 Completed 0 3m43s example-registry-quay-redis-8bd67b647-skgqx 1/1 Running 0 3m42s -``` - +---- \ No newline at end of file diff --git a/modules/operator-deploy.adoc b/modules/operator-deploy.adoc index 38218d505..26144abe0 100644 --- a/modules/operator-deploy.adoc +++ b/modules/operator-deploy.adoc @@ -1,7 +1,8 @@ -[[operator-deploy]] -= Deploying Quay using the Quay Operator +:_content-type: REFERENCE +[id="operator-deploy"] += Deploying {productname} using the Operator -The Operator can be deployed from the command line or from the OpenShift console, but the fundamental steps are the same. +{productname-ocp} can be deployed using command-line interface or from the {ocp} console. The steps are fundamentally the same. diff --git a/modules/operator-differences.adoc b/modules/operator-differences.adoc index 8f23dba7e..1122563d3 100644 --- a/modules/operator-differences.adoc +++ b/modules/operator-differences.adoc @@ -1,15 +1,28 @@ -[[operator-differences]] +:_content-type: CONCEPT +[id="operator-differences"] //= Differences from Earlier Versions -As of {productname} 3.4.0, the Operator has been completely re-written to provide an improved out of the box experience as well as support for more Day 2 operations. As a result the new Operator is simpler to use and is more opinionated. The key differences from earlier versions of the Operator are: +With the release of {productname} 3.4.0, the {productname} Operator was re-written to offer an enhanced experience and to add more support for Day 2 operations. As a result, the {productname} Operator is now simpler to use and is more opinionated. The key difference from versions prior to {productname} 3.4.0 include the following: + +* The `QuayEcosystem` custom resource has been replaced with the `QuayRegistry` custom resource. +* The default installation options produces a fully supported {productname} environment, with all managed dependencies, such as database, caches, object storage, and so on, supported for production use. ++ +[NOTE] +==== +Some components might not be highly available. +==== + +* A new validation library for {productname}'s configuration. -* The `QuayEcosystem` custom resource has been replaced with the `QuayRegistry` custom resource -* The default installation options produces a fully supported Quay environment with all managed dependencies (database, caches, object storage, etc) supported for production use (some components may not be highly available) -* A new robust validation library for Quay's configuration which is shared by the Quay application and config tool for consistency ifeval::["{productname}" == "Red Hat Quay"] -* Object storage can now be managed by the Operator using the `ObjectBucketClaim` Kubernetes API (Red Hat OpenShift Data Foundation can be used to provide a supported implementation of this API on OpenShift) +* Object storage can now be managed by the {productname} Operator using the `ObjectBucketClaim` Kubernetes API ++ +[NOTE] +==== +Red Hat OpenShift Data Foundation can be used to provide a supported implementation of this API on {ocp}. +==== endif::[] ifeval::["{productname}" == "Project Quay"] -* Object storage can now be provided by the Operator using the `ObjectBucketClaim` Kubernetes API (e.g. the NooBaa Operator can be from OperatorHub.io can be used to provide an implementation of that API) +* Object storage can now be provided by the {productname} Operator using the `ObjectBucketClaim` Kubernetes API. For example, the NooBaa Operator from `OperatorHub.io` can be used to provide an implementation of that API. endif::[] -* Customization of the container images used by deployed pods for testing and development scenarios \ No newline at end of file +* Customization of the container images used by deployed pods for testing and development scenarios. \ No newline at end of file diff --git a/modules/operator-external-access.adoc b/modules/operator-external-access.adoc index 5ed00e5f1..156e054c2 100644 --- a/modules/operator-external-access.adoc +++ b/modules/operator-external-access.adoc @@ -1,8 +1,7 @@ -[[operator-external-access]] +[id="operator-external-access"] = External Access to the Registry - -When running on OpenShift, the `Routes` API is available and will automatically be used as a managed component. After creating the `QuayRegistry`, the external access point can be found in the status block of the `QuayRegistry`: +When running on {ocp}, the `Routes` API is available and is automatically used as a managed component. After creating the `QuayRegistry` object, the external access point can be found in the status block of the `QuayRegistry` object. For example: [source,yaml] ---- @@ -56,7 +55,7 @@ spec: configBundleSecret: my-config-bundle ---- -== Using OpenShift Provided TLS Certificate +== Using OpenShift Provided TLS Certificate It is preferred to have TLS terminated in the Quay app container. Therefore, to use the OpenShift provided TLS, you must create a `Route` with type "reencrypt", which will use the OpenShift provided TLS at the edge, and Quay Operator-generated TLS within the cluster. This is achieved by marking the `route` component as unmanaged, and creating your own `Route` which link:https://docs.openshift.com/container-platform/4.7/networking/routes/secured-routes.html[reencrypts TLS] using the Operator-generated CA certificate. @@ -99,7 +98,7 @@ metadata: name: registry namespace: spec: - to: + to: kind: Service name: tls: diff --git a/modules/operator-first-user-ui.adoc b/modules/operator-first-user-ui.adoc index bcf9ebd32..6848e004d 100644 --- a/modules/operator-first-user-ui.adoc +++ b/modules/operator-first-user-ui.adoc @@ -1,22 +1,32 @@ -[[operator-first-user]] -= Using the Quay UI to create the first user +:_content-type: PROCEDURE +[id="operator-first-user"] += Using the {productname} UI to create the first user + +Use the following procedure to create the first user by the {productname} UI. [NOTE] ==== -This procedure assumes that the `FEATURE_USER_CREATION` config option has not been set to `false.` If it is `false`, then the `Create Account` functionality on the UI will be disabled, and you will have to use the API to create the first user. +This procedure assumes that the `FEATURE_USER_CREATION` config option has not been set to `false.` If it is `false`, the `Create Account` functionality on the UI will be disabled, and you will have to use the API to create the first user. ==== -. In the OpenShift console, navigate to Operators -> Installed Operators, with the appropriate namespace / project. -. Click on the newly installed QuayRegistry, to view the details: +.Procedure + +. In the {ocp} console, navigate to *Operators* -> *Installed Operators*, with the appropriate namespace / project. + +. Click on the newly installed `QuayRegistry` object to view the details. For example: + image:config-editor-details-operator-36.png[QuayRegistry details] -. Once the `Registry Endpoint` has a value, navigate to this URL in your browser -. Select 'Create Account' in the Quay registry UI to create a user + +. After the `Registry Endpoint` has a value, navigate to this URL in your browser. + +. Select *Create Account* in the {productname} registry UI to create a user. For example: + image:create-account-1.png[Create Account] -. Enter details for username, password, email and click `Create Account` + +. Enter the details for *Username*, *Password*, *Email*, and then click *Create Account*. For example: + image:create-account-2.png[Enter account details] -. You are automatically logged in to the Quay registry -+ + +After creating the first user, you are automatically logged in to the {productname} registry. For example: + image:create-account-3.png[Initial log in] \ No newline at end of file diff --git a/modules/operator-geo-replication.adoc b/modules/operator-geo-replication.adoc new file mode 100644 index 000000000..11f50bc5e --- /dev/null +++ b/modules/operator-geo-replication.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="operator-geo-replication-faq"] += Does the {productname} Operator support geo-replication? + +As of {productname} 3.7 and later, the {productname} Operator supports geo-replication deployments. Earlier versions of {productname} do not support geo-replication. This feature will not be backported to earlier versions of {productname}. You must upgrade {productname} to 3.7 or later to use the geo-replication feature. \ No newline at end of file diff --git a/modules/operator-georepl-site-removal.adoc b/modules/operator-georepl-site-removal.adoc new file mode 100644 index 000000000..e90d436f1 --- /dev/null +++ b/modules/operator-georepl-site-removal.adoc @@ -0,0 +1,79 @@ +:_content-type: PROCEDURE +[id="operator-georepl-site-removal"] += Removing a geo-replicated site from your {productname-ocp} deployment + +By using the following procedure, {productname} administrators can remove sites in a geo-replicated setup. + +.Prerequisites + +* You are logged into {ocp}. +* You have configured {productname} geo-replication with at least two sites, for example, `usstorage` and `eustorage`. +* Each site has its own Organization, Repository, and image tags. + +.Procedure + +. Sync the blobs between all of your defined sites by running the following command: ++ +[source,terminal] +---- +$ python -m util.backfillreplication +---- ++ +[WARNING] +==== +Prior to removing storage engines from your {productname} `config.yaml` file, you *must* ensure that all blobs are synced between all defined sites. + +When running this command, replication jobs are created which are picked up by the replication worker. If there are blobs that need replicated, the script returns UUIDs of blobs that will be replicated. If you run this command multiple times, and the output from the return script is empty, it does not mean that the replication process is done; it means that there are no more blobs to be queued for replication. Customers should use appropriate judgement before proceeding, as the allotted time replication takes depends on the number of blobs detected. + +Alternatively, you could use a third party cloud tool, such as Microsoft Azure, to check the synchronization status. + +This step must be completed before proceeding. +==== + +. In your {productname} `config.yaml` file for site `usstorage`, remove the `DISTRIBUTED_STORAGE_CONFIG` entry for the `eustorage` site. + +. Enter the following command to identify your `Quay` application pods: ++ +[source,terminal] +---- +$ oc get pod -n +---- ++ +.Example output ++ +[source,terminal] +---- +quay390usstorage-quay-app-5779ddc886-2drh2 +quay390eustorage-quay-app-66969cd859-n2ssm +---- + +. Enter the following command to open an interactive shell session in the `usstorage` pod: ++ +[source,terminal] +---- +$ oc rsh quay390usstorage-quay-app-5779ddc886-2drh2 +---- + +. Enter the following command to permanently remove the `eustorage` site: ++ +[IMPORTANT] +==== +The following action cannot be undone. Use with caution. +==== ++ +[source,terminal] +---- +sh-4.4$ python -m util.removelocation eustorage +---- ++ +.Example output ++ +[source,terminal] +---- +WARNING: This is a destructive operation. Are you sure you want to remove eustorage from your storage locations? [y/n] y +Deleted placement 30 +Deleted placement 31 +Deleted placement 32 +Deleted placement 33 +Deleted location eustorage +---- \ No newline at end of file diff --git a/modules/operator-helm-oci.adoc b/modules/operator-helm-oci.adoc index d775452f1..819a9cd7f 100644 --- a/modules/operator-helm-oci.adoc +++ b/modules/operator-helm-oci.adoc @@ -1,17 +1,18 @@ -[[operator-helm-oci]] -= Configuring OCI and Helm with the Operator +[id="operator-helm-oci"] += Enabling OCI support with the {productname} Operator -Customizations to the configuration of Quay can be provided in a secret containing the configuration bundle. Execute the following command which will create a new secret called `quay-config-bundle`, in the appropriate namespace, containing the necessary properties to enable OCI support. +Use the following procedure to configure Open Container Initiative (OCI) support for {productname}. +.Procedure -.quay-config-bundle.yaml +. Create a `quay-config-bundle` YAML file that includes the following information: ++ [source,yaml] ---- apiVersion: v1 stringData: config.yaml: | FEATURE_GENERAL_OCI_SUPPORT: true - FEATURE_HELM_OCI_SUPPORT: true kind: Secret metadata: name: quay-config-bundle @@ -19,21 +20,15 @@ metadata: type: Opaque ---- -[IMPORTANT] -==== -As of {productname} {producty}, `FEATURE_HELM_OCI_SUPPORT` has been deprecated and will be removed in a future version of {productname}. In {productname} 3.6, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. -==== - -Create the secret in the appropriate namespace, in this example `quay-enterprise`: - +. Enter the following command to create a the `quay-config-bundle` object in the appropriate namespace, passing in the necessary properties to enable OCI support. For example: ++ +[source,terminal] ---- $ oc create -n quay-enterprise -f quay-config-bundle.yaml ---- - -Specify the secret for the `spec.configBundleSecret` field: - -.quay-registry.yaml +. In your `quay-registry.yaml` file, reference the secret for the `spec.configBundleSecret` field. For example: ++ [source,yaml] ---- apiVersion: quay.redhat.com/v1 @@ -45,9 +40,7 @@ spec: configBundleSecret: quay-config-bundle ---- +[role="_additional-resources"] +.Additional resources -Create the registry with the specified configuration: - ----- -$ oc create -n quay-enterprise -f quay-registry.yaml ----- +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/oci-intro#doc-wrapper[OCI Support and {productname}] \ No newline at end of file diff --git a/modules/operator-install.adoc b/modules/operator-install.adoc index 650ebde73..a01e42c73 100644 --- a/modules/operator-install.adoc +++ b/modules/operator-install.adoc @@ -1,26 +1,25 @@ -= Installing the Quay Operator from OperatorHub +:_content-type: PROCEDURE [id="operator-install"] += Installing the {productname} Operator from the OperatorHub +Use the following procedure to install the {productname} Operator from the {ocp} OperatorHub. +.Procedure +. Using the {ocp} console, select *Operators* -> *OperatorHub*. + +. In the search box, type *{productname}* and select the official {productname} Operator provided by Red Hat. This directs you to the *Installation* page, which outlines the features, prerequisites, and deployment information. + +. Select *Install*. This directs you to the *Operator Installation* page. -. Using the OpenShift console, Select Operators -> OperatorHub, then select the {productname} Operator. If there is more than one, be sure to use the Red Hat certified Operator and not the community version. -+ -image:operatorhub-quay.png[] -. The Installation page outlines the features and prerequisites: -+ -image:operator-install-page.png[] -. Select Install. The Operator Installation page appears. -+ -image:operator-subscription.png[] . The following choices are available for customizing the installation: -* **Update Channel:** Choose the update channel, for example, `stable-3.7` for the latest release. +.. **Update Channel:** Choose the update channel, for example, `stable-{producty}` for the latest release. -* **Installation Mode:** Choose `All namespaces on the cluster` if you want the Operator to be available cluster-wide. Choose `A specific namespace on the cluster` if you want it deployed only within a single namespace. It is recommended that you install the Operator cluster-wide. If you choose a single namespace, the monitoring component will not be available by default. +.. **Installation Mode:** +... Choose `All namespaces on the cluster` if you want the {productname} Operator to be available cluster-wide. It is recommended that you install the {productname} Operator cluster-wide. If you choose a single namespace, the monitoring component will not be available by default. +... Choose `A specific namespace on the cluster` if you want it deployed only within a single namespace. * **Approval Strategy:** Choose to approve either automatic or manual updates. Automatic update strategy is recommended. -. Select Install. - -. After a short time, you will see the Operator installed successfully in the Installed Operators page. +. Select *Install*. \ No newline at end of file diff --git a/modules/operator-ipv6-dual-stack.adoc b/modules/operator-ipv6-dual-stack.adoc index 7ed74b127..c213e116a 100644 --- a/modules/operator-ipv6-dual-stack.adoc +++ b/modules/operator-ipv6-dual-stack.adoc @@ -1,19 +1,24 @@ :_content-type: CONCEPT [id="operator-ipv6-dual-stack"] -= Deploying IPv6 on the {productname} Operator += Deploying IPv6 on {productname-ocp} -Your {productname} Operator deployment can now be served in locations that only support IPv6, such as Telco and Edge environments. +[NOTE] +==== +Currently, deploying IPv6 on the {productname-ocp} is not supported on IBM Power and IBM Z. +==== + +Your {productname-ocp} deployment can now be served in locations that only support IPv6, such as Telco and Edge environments. For a list of known limitations, see xref:operator-ipv6-limitations-38[IPv6 limitations] [id="proc-manage-enabling-ipv6"] == Enabling the IPv6 protocol family -Use the following procedure to enable IPv6 support on your standalone {productname} deployment. +Use the following procedure to enable IPv6 support on your {productname} deployment. .Prerequisites -* You have updated {productname} to 3.8. +* You have updated {productname} to at least version 3.8. * Your host and container software platform (Docker, Podman) must be configured to support IPv6. .Procedure @@ -22,13 +27,13 @@ Use the following procedure to enable IPv6 support on your standalone {productna + [source,yaml] ---- ---- +# ... FEATURE_GOOGLE_LOGIN: false FEATURE_INVITE_ONLY_USER_CREATION: false FEATURE_LISTEN_IP_VERSION: IPv6 FEATURE_MAILING: false FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false ---- +# ... ---- . Start, or restart, your {productname} deployment. @@ -51,14 +56,10 @@ If your environment is configured to IPv4, but the `FEATURE_LISTEN_IP_VERSION` c [id="operator-ipv6-limitations-38"] == IPv6 limitations -* Currently, attempting to configure your {productname} deployment with the common Azure Blob Storage configuration will not work on IPv6 single stack environments. Because the endpoint of Azure Blob Storage does not support IPv6, there is no workaround in place for this issue. +* Currently, attempting to configure your {productname} deployment with the common Microsoft Azure Blob Storage configuration will not work on IPv6 single stack environments. Because the endpoint of Microsoft Azure Blob Storage does not support IPv6, there is no workaround in place for this issue. + For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4433[PROJQUAY-4433]. * Currently, attempting to configure your {productname} deployment with Amazon S3 CloudFront will not work on IPv6 single stack environments. Because the endpoint of Amazon S3 CloudFront does not support IPv6, there is no workaround in place for this issue. + -For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4470[PROJQUAY-4470]. - -* Currently, OpenShift Data Foundations (ODF) is unsupported when {productname} is deployed on IPv6 single stack environments. As a result, ODF cannot be used in IPv6 environments. This limitation is scheduled to be fixed in a future version of OpenShift Data Foundations. - -* Currently, dual-stack (IPv4 and IPv6) support does not work on {productname} {ocp} deployments. When {productname} 3.8 is deployed on {ocp} with dual-stack support enabled, the Quay Route generated by the {productname} Operator only generates an IPv4 address, and not an IPv6 address. As a result, clients with an IPv6 address cannot access the {productname} application on {ocp}. This limitation is scheduled to be fixed in a future version of {ocp}. \ No newline at end of file +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4470[PROJQUAY-4470]. \ No newline at end of file diff --git a/modules/operator-managed-postgres.adoc b/modules/operator-managed-postgres.adoc index 7a3d3410f..ba294cc21 100644 --- a/modules/operator-managed-postgres.adoc +++ b/modules/operator-managed-postgres.adoc @@ -1,8 +1,34 @@ -[[operator-managed-postgres]] -= Using the managed PostgreSQL +:_content-type: CONCEPT +[id="operator-managed-postgres"] += Using the managed PostgreSQL database -Recommendations: +With {productname} 3.9, if your database is managed by the {productname} Operator, updating from {productname} 3.8 -> 3.9 automatically handles upgrading PostgreSQL 10 to PostgreSQL 13. -* Database backups should be performed regularly using either the supplied tools on the Postgres image or your own backup infrastructure. The Operator does not currently ensure the Postgres database is backed up. -* Restoring the Postgres database from a backup must be done using Postgres tools and procedures. Be aware that your Quay `Pods` should not be running while the database restore is in progress. -* Database disk space is allocated automatically by the Operator with 50 GiB. This number represents a usable amount of storage for most small to medium {productname} installations but may not be sufficient for your use cases. Resizing the database volume is currently not handled by the Operator. \ No newline at end of file +[IMPORTANT] +==== +* Users with a managed database are required to upgrade their PostgreSQL database from 10 -> 13. +* If your {productname} and Clair databases are managed by the Operator, the database upgrades for each component must succeed for the 3.9.0 upgrade to be successful. If either of the database upgrades fail, the entire {productname} version upgrade fails. This behavior is expected. +==== + +If you do not want the {productname} Operator to upgrade your PostgreSQL deployment from PostgreSQL 10 -> 13, you must set the PostgreSQL parameter to `managed: false` in your `quayregistry.yaml` file. For more information about setting your database to unmanaged, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#operator-unmanaged-postgres[Using an existing Postgres database]. + +[IMPORTANT] +==== +* It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. + +==== + +If you want your PostgreSQL database to match the same version as your {rhel} system, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/deploying_different_types_of_servers/using-databases#migrating-to-a-rhel-8-version-of-postgresql_using-postgresql[Migrating to a RHEL 8 version of PostgreSQL] for {rhel-short} 8 or link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_using_database_servers/using-postgresql_configuring-and-using-database-servers#migrating-to-a-rhel-9-version-of-postgresql_using-postgresql[Migrating to a RHEL 9 version of PostgreSQL] for {rhel-short} 9. + +For more information about the {productname} 3.8 -> 3.9 procedure, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/upgrade_red_hat_quay/index#operator-upgrade[Upgrading the {productname} Operator overview]. + +[id="operator-managed-postgres-recommendations"] +== PostgreSQL database recommendations + +The {productname} team recommends the following for managing your PostgreSQL database. + +* Database backups should be performed regularly using either the supplied tools on the PostgreSQL image or your own backup infrastructure. The {productname} Operator does not currently ensure that the PostgreSQL database is backed up. + +* Restoring the PostgreSQL database from a backup must be done using PostgreSQL tools and procedures. Be aware that your `Quay` pods should not be running while the database restore is in progress. + +* Database disk space is allocated automatically by the {productname} Operator with 50 GiB. This number represents a usable amount of storage for most small to medium {productname} installations but might not be sufficient for your use cases. Resizing the database volume is currently not handled by the {productname} Operator. \ No newline at end of file diff --git a/modules/operator-managed-storage.adoc b/modules/operator-managed-storage.adoc index d70b0cb42..072beb0b0 100644 --- a/modules/operator-managed-storage.adoc +++ b/modules/operator-managed-storage.adoc @@ -1,26 +1,27 @@ -[[operator-managed-storage]] +:_content-type: CONCEPT +[id="operator-managed-storage"] = Managed storage ifeval::["{productname}" == "Red Hat Quay"] -If you want the Operator to manage object storage for Quay, your cluster needs to be capable of providing object storage via the `ObjectBucketClaim` API. Using the Red Hat OpenShift Data Foundation (ODF) Operator, there are two supported options available: +If you want the {productname} Operator to manage object storage for {productname}, your cluster needs to be capable of providing object storage through the `ObjectBucketClaim` API. Using the {odf} Operator, there are two supported options available: -* A standalone instance of the Multi-Cloud Object Gateway backed by a local Kubernetes `PersistentVolume` storage -** Not highly available -** Included in the Quay subscription -** Does not require a separate subscription for ODF -* A production deployment of ODF with scale-out Object Service and Ceph +* A standalone instance of the Multi-Cloud Object Gateway backed by a local Kubernetes `PersistentVolume` storage +** Not highly available +** Included in the {productname} subscription +** Does not require a separate subscription for {odf} +* A production deployment of {odf} with scale-out Object Service and Ceph ** Highly available -** Requires a separate subscription for ODF +** Requires a separate subscription for {odf} -To use the standalone instance option, continue reading below. For production deployment of ODF, please refer to the link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/[official documentation]. +To use the standalone instance option, continue reading below. For production deployment of {odf}, please refer to the link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/[official documentation]. endif::[] ifeval::["{productname}" == "Project Quay"] -If you want the Operator to manage object storage for Quay, your cluster needs to be capable of providing it via the `ObjectBucketClaim` API. There are multiple implementations of this API available, for instance, link:https://operatorhub.io/operator/noobaa-operator[NooBaa] in combination with Kubernetes `PersistentVolumes` or scalable storage backends like Ceph. Refer to the link:https://github.com/noobaa/noobaa-core[NooBaa documentation] for more details on how to deploy this component. +If you want the Operator to manage object storage for {productname}, your cluster needs to be capable of providing it through the `ObjectBucketClaim` API. There are multiple implementations of this API available, for instance, link:https://operatorhub.io/operator/noobaa-operator[NooBaa] in combination with Kubernetes `PersistentVolumes` or scalable storage backends like Ceph. Refer to the link:https://github.com/noobaa/noobaa-core[NooBaa documentation] for more details on how to deploy this component. endif::[] [NOTE] ==== -Object storage disk space is allocated automatically by the Operator with 50 GiB. This number represents a usable amount of storage for most small to medium {productname} installations but may not be sufficient for your use cases. Resizing the RHOCS volume is currently not handled by the Operator. See the section below on resizing managed storage for more details. +Object storage disk space is allocated automatically by the {productname} Operator with 50 GiB. This number represents a usable amount of storage for most small to medium {productname} installations but might not be sufficient for your use cases. Resizing the {odf} volume is currently not handled by the {productname} Operator. See the section below about resizing managed storage for more details. ==== \ No newline at end of file diff --git a/modules/operator-monitor-deploy-cli.adoc b/modules/operator-monitor-deploy-cli.adoc index 71b68a77c..b9338e120 100644 --- a/modules/operator-monitor-deploy-cli.adoc +++ b/modules/operator-monitor-deploy-cli.adoc @@ -1,15 +1,22 @@ -[[operator-monitor-deploy-cli]] +:_content-type: PROCEDURE +[id="operator-monitor-deploy-cli"] = Monitoring and debugging the deployment process -Users can now troubleshoot problems during the deployment phase. The status in the `QuayRegistry` object can help you monitor the health of the components during the deployment an help you debug any problems that may arise: +Users can now troubleshoot problems during the deployment phase. The status in the `QuayRegistry` object can help you monitor the health of the components during the deployment an help you debug any problems that may arise. +.Procedure -``` +. Enter the following command to check the status of your deployment: ++ +[source,terminal] +---- $ oc get quayregistry -n quay-enterprise -o yaml -``` - -Immediately after deployment, the QuayRegistry object will show the basic configuration: - +---- ++ +.Example output ++ +Immediately after deployment, the `QuayRegistry` object will show the basic configuration: ++ [source,yaml] ---- apiVersion: v1 @@ -44,6 +51,8 @@ items: managed: true - kind: tls managed: true + - kind: clairpostgres + managed: true configBundleSecret: example-registry-config-bundle-kt55s kind: List metadata: @@ -51,30 +60,32 @@ metadata: selfLink: "" ---- - -Use the `oc get pods` command to view the current state of the deployed components: - -``` +. Use the `oc get pods` command to view the current state of the deployed components: ++ +[source,terminal] +---- $ oc get pods -n quay-enterprise - +---- ++ +.Example output ++ +[source,terminal] +---- NAME READY STATUS RESTARTS AGE example-registry-clair-app-86554c6b49-ds7bl 0/1 ContainerCreating 0 2s example-registry-clair-app-86554c6b49-hxp5s 0/1 Running 1 17s example-registry-clair-postgres-68d8857899-lbc5n 0/1 ContainerCreating 0 17s example-registry-quay-app-upgrade-h2v7h 0/1 ContainerCreating 0 9s -example-registry-quay-config-editor-5f646cbcb7-lbnc2 0/1 ContainerCreating 0 17s example-registry-quay-database-66f495c9bc-wqsjf 0/1 ContainerCreating 0 17s example-registry-quay-mirror-854c88457b-d845g 0/1 Init:0/1 0 2s example-registry-quay-mirror-854c88457b-fghxv 0/1 Init:0/1 0 17s example-registry-quay-postgres-init-bktdt 0/1 Terminating 0 17s example-registry-quay-redis-f9b9d44bf-4htpz 0/1 ContainerCreating 0 17s -``` - - -While the deployment is in progress, the QuayRegistry object will show the current status. In this instance, database migrations are taking place, and other components are waiting until this completes. - +---- -[source,yaml] +. While the deployment is in progress, the `QuayRegistry` object will show the current status. In this instance, database migrations are taking place, and other components are waiting until completion: ++ +[source,terminal] ---- status: conditions: @@ -90,8 +101,6 @@ While the deployment is in progress, the QuayRegistry object will show the curre reason: MigrationsInProgress status: "False" type: Available - configEditorCredentialsSecret: example-registry-quay-config-editor-credentials-btbkcg8dc9 - configEditorEndpoint: https://example-registry-quay-config-editor-quay-enterprise.apps.docs.quayteam.org lastUpdated: 2021-09-14 10:52:05.371425635 +0000 UTC unhealthyComponents: clair: @@ -116,9 +125,9 @@ While the deployment is in progress, the QuayRegistry object will show the curre type: Available ---- -When the deployment process finishes successfully, the status in the QuayRegistry object shows no unhealthy components: - -[source,yaml] +. When the deployment process finishes successfully, the status in the `QuayRegistry` object shows no unhealthy components: ++ +[source,terminal] ---- status: conditions: @@ -134,10 +143,8 @@ When the deployment process finishes successfully, the status in the QuayRegistr reason: ComponentsCreationSuccess status: "False" type: RolloutBlocked - configEditorCredentialsSecret: example-registry-quay-config-editor-credentials-hg7gg7h57m - configEditorEndpoint: https://example-registry-quay-config-editor-quay-enterprise.apps.docs.quayteam.org currentVersion: {producty} lastUpdated: 2021-09-14 10:52:46.104181633 +0000 UTC registryEndpoint: https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org unhealthyComponents: {} ----- +---- \ No newline at end of file diff --git a/modules/operator-preconfig-storage.adoc b/modules/operator-preconfig-storage.adoc index 67c668fdf..2a5322e21 100644 --- a/modules/operator-preconfig-storage.adoc +++ b/modules/operator-preconfig-storage.adoc @@ -1,8 +1,9 @@ -[[operator-storage-preconfig]] +:_content-type: REFERENCE +[id="operator-storage-preconfig"] = Configuring object storage -You need to configure object storage before installing {productname}, irrespective of whether you are allowing the Operator to manage the storage or managing it yourself. +You need to configure object storage before installing {productname}, irrespective of whether you are allowing the {productname} Operator to manage the storage or managing it yourself. -If you want the Operator to be responsible for managing storage, see the section on xref:operator-managed-storage[Managed storage] for information on installing and configuring the NooBaa / RHOCS Operator. +If you want the {productname} Operator to be responsible for managing storage, see the section on xref:operator-managed-storage[Managed storage] for information on installing and configuring NooBaa and the Red Hat OpenShift Data Foundations Operator. If you are using a separate storage solution, set `objectstorage` as `unmanaged` when configuring the Operator. See the following section. xref:operator-unmanaged-storage[Unmanaged storage], for details of configuring existing storage. diff --git a/modules/operator-preconfig-tls-routes.adoc b/modules/operator-preconfig-tls-routes.adoc index a7d3f0413..ba8c97e1e 100644 --- a/modules/operator-preconfig-tls-routes.adoc +++ b/modules/operator-preconfig-tls-routes.adoc @@ -1,14 +1,23 @@ -[[operator-preconfig-tls-routes]] -= Configuring TLS and routes +:_content-type: REFERENCE +[id="operator-preconfig-tls-routes"] += Configuring SSL/TLS and Routes -Support for OpenShift Container Platform Edge-Termination Routes has been added by way of a new managed component, `tls`. This separates the `route` component from TLS and allows users to configure both separately. `EXTERNAL_TLS_TERMINATION: true` is the opinionated setting. Managed `tls` means that the default cluster wildcard cert is used. Unmanaged `tls` means that the user provided cert/key pair will be injected into the `Route`. +Support for {ocp} _edge termination_ routes have been added by way of a new managed component, `tls`. This separates the `route` component from SSL/TLS and allows users to configure both separately. -`ssl.cert` and `ssl.key` are now moved to a separate, persistent Secret, which ensures that the cert/key pair is not re-generated upon every reconcile. These are now formatted as `edge` routes and mounted to the same directory in the Quay container. +`EXTERNAL_TLS_TERMINATION: true` is the opinionated setting. -Multiple permutations are possible when configuring TLS and Routes, but the following rules apply: +[NOTE] +==== +* Managed `tls` means that the default cluster wildcard certificate is used. +* Unmanaged `tls` means that the user provided key and certificate pair is be injected into the route. +==== + +The `ssl.cert` and `ssl.key` are now moved to a separate, persistent secret, which ensures that the key and certificate pair are not regenerated upon every reconcile. The key and certificate pair are now formatted as `edge` routes and mounted to the same directory in the `Quay` container. -* If TLS is `managed`, then route must also be `managed` -* If TLS is `unmanaged` then you must supply certs, either with the config tool or directly in the config bundle +Multiple permutations are possible when configuring SSL/TLS and routes, but the following rules apply: + +* If SSL/TLS is `managed`, then your route must also be `managed`. +* If SSL/TLS is `unmanaged` then you must supply certificates directly in the config bundle. //* However, it is possible to have both TLS and route `unmanaged` and not supply certs. The following table describes the valid options: @@ -17,22 +26,22 @@ The following table describes the valid options: [width="100%",cols="2,2,2,2,3"options="header"] |=== |Option | Route | TLS | Certs provided |Result -| My own load balancer handles TLS | Managed | Managed | No |Edge Route with default wildcard cert +| My own load balancer handles TLS | Managed | Managed | No |Edge route with default wildcard cert | {productname} handles TLS | Managed | Unmanaged | Yes | Passthrough route with certs mounted inside the pod -| {productname} handles TLS | Unmanaged | Unmanaged | Yes | Certificates are set inside the quay pod but route must be created manually +| {productname} handles TLS | Unmanaged | Unmanaged | Yes | Certificates are set inside of the `quay` pod, but the route must be created manually // | None (Not for production) | Unmanaged | Unmanaged | No | Sets a passthrough route, allows HTTP traffic directly from the route and into the Pod |=== -[NOTE] -==== -{productname} 3.7 does not support builders when TLS is managed by the Operator. -==== +[id="creating-config-bundle-secret-tls-cert-key-pair"] +== Creating the config bundle secret with the SSL/TLS cert and key pair -== Creating the config bundle secret with TLS cert, key pair: +Use the following procedure to create a config bundle secret that includes your own SSL/TLS certificate and key pair. -To add your own TLS cert and key, include them in the config bundle secret as follows: +.Procedure -[source,bash] +* Enter the following command to create config bundle secret that includes your own SSL/TLS certificate and key pair: ++ +[source,terminal] ---- $ oc create secret generic --from-file config.yaml=./config.yaml --from-file ssl.cert=./ssl.cert --from-file ssl.key=./ssl.key config-bundle-secret ---- diff --git a/modules/operator-preconfigure.adoc b/modules/operator-preconfigure.adoc index 799fb5464..a9f0c9070 100644 --- a/modules/operator-preconfigure.adoc +++ b/modules/operator-preconfigure.adoc @@ -1,19 +1,66 @@ -[[operator-preconfigure]] -= Configuring Quay before deployment +:_content-type: PROCEDURE +[id="operator-preconfigure"] += Configuring {productname} before deployment -The Operator can manage all the {productname} components when deploying on OpenShift, and this is the default configuration. Alternatively, you can manage one or more components externally yourself, where you want more control over the set up, and then allow the Operator to manage the remaining components. +The {productname} Operator can manage all of the {productname} components when deployed on {ocp}. This is the default configuration, however, you can manage one or more components externally when you want more control over the set up. -The standard pattern for configuring unmanaged components is: +Use the following pattern to configure unmanaged {productname} components. -. Create a `config.yaml` configuration file with the appropriate settings -. Create a Secret using the configuration file +.Procedure + +. Create a `config.yaml` configuration file with the appropriate settings. Use the following reference for a minimal configuration: ++ +[source,terminal] +---- +$ touch config.yaml +---- ++ +[source,yaml] +---- +AUTHENTICATION_TYPE: Database +BUILDLOGS_REDIS: + host: + password: + port: 6379 + ssl: false +DATABASE_SECRET_KEY: <0ce4f796-c295-415b-bf9d-b315114704b8> +DB_URI: +DEFAULT_TAG_EXPIRATION: 2w +DISTRIBUTED_STORAGE_CONFIG: + default: + - LocalStorage + - storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +PREFERRED_URL_SCHEME: http +SECRET_KEY: +SERVER_HOSTNAME: +SETUP_COMPLETE: true +TAG_EXPIRATION_OPTIONS: + - 0s + - 1d + - 1w + - 2w + - 4w + - 3y +USER_EVENTS_REDIS: + host: + port: 6379 + ssl: false +---- + +. Create a `Secret` using the configuration file by entering the following command: + +[source,terminal] ---- $ oc create secret generic --from-file config.yaml=./config.yaml config-bundle-secret ---- -. Create a QuayRegistry YAML file `quayregistry.yaml`, identifying the unmanaged components and also referencing the created Secret, for example: + +. Create a `quayregistry.yaml` file, identifying the unmanaged components and also referencing the created `Secret`, for example: ++ +.Example `QuayRegistry` YAML file + -.quayregistry.yaml [source,yaml] ---- apiVersion: quay.redhat.com/v1 @@ -22,13 +69,16 @@ metadata: name: example-registry namespace: quay-enterprise spec: - configBundleSecret: config-bundle-secret + configBundleSecret: components: - kind: objectstorage managed: false +# ... ---- -. Deploy the registry using the YAML file: + +. Enter the following command to deploy the registry by using the `quayregistry.yaml` file: + +[source,terminal] ---- $ oc create -n quay-enterprise -f quayregistry.yaml ---- diff --git a/modules/operator-prereq.adoc b/modules/operator-prereq.adoc index 05a6b495e..7599a73db 100644 --- a/modules/operator-prereq.adoc +++ b/modules/operator-prereq.adoc @@ -1,30 +1,45 @@ -[[operator-prereq]] -= Prerequisites for {productname} on OpenShift +:_content-type: REFERENCE +[id="operator-prereq"] += Prerequisites for {productname} on {ocp} -Before you begin the deployment of {productname} Operator on OpenShift, you should consider the following. +Consider the following prerequisites prior to deploying {productname} on {ocp} using the {productname} Operator. -== OpenShift cluster +[id="openshift-cluster"] +== {ocp} cluster -You need a privileged account to an OpenShift 4.5 or later cluster on which to deploy the {productname} Operator. That account must have the ability to create namespaces at the cluster scope. +To deploy the {productname} Operator, you must have an {ocp} 4.5 or later cluster and access to an administrative account. The administrative account must have the ability to create namespaces at the cluster scope. +[id="resource-requirements"] == Resource Requirements Each {productname} application pod has the following resource requirements: -* 8Gi of memory -* 2000 millicores of CPU. +* 8 Gi of memory +* 2000 millicores of CPU -The {productname} Operator will create at least one application pod per {productname} deployment it manages. Ensure your OpenShift cluster has sufficient compute resources for these requirements. +The {productname} Operator creates at least one application pod per {productname} deployment it manages. Ensure your {ocp} cluster has sufficient compute resources for these requirements. +[id="object-storage"] == Object Storage -By default, the {productname} Operator uses the `ObjectBucketClaim` Kubernetes API to provision object storage. Consuming this API decouples the Operator from any vendor-specific implementation. Red Hat OpenShift Data Foundation provides this API via its NooBaa component, which will be used in this example. +By default, the {productname} Operator uses the `ObjectBucketClaim` Kubernetes API to provision object storage. Consuming this API decouples the {productname} Operator from any vendor-specific implementation. {odf} provides this API through its NooBaa component, which is used as an example throughout this documentation. -{productname} can be manually configured to use any of the following supported cloud storage options: +{productname} can be manually configured to use multiple storage cloud providers, including the following: * Amazon S3 (see link:https://access.redhat.com/solutions/3680151[S3 IAM Bucket Policy] for details on configuring an S3 bucket policy for {productname}) -* Azure Blob Storage +* Microsoft Azure Blob Storage * Google Cloud Storage * Ceph Object Gateway (RADOS) * OpenStack Swift * CloudFront + S3 + +For a complete list of object storage providers, the link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x support matrix]. + +[id="storage-class"] +== StorageClass + +When deploying `Quay` and `Clair` PostgreSQL databases using the {productname} Operator, a default `StorageClass` is configured in your cluster. + +The default `StorageClass` used by the {productname} Operator provisions the Persistent Volume Claims required by the `Quay` and `Clair` databases. These PVCs are used to store data persistently, ensuring that your {productname} registry and Clair vulnerability scanner remain available and maintain their state across restarts or failures. + +Before proceeding with the installation, verify that a default `StorageClass` is configured in your cluster to ensure seamless provisioning of storage for `Quay` and `Clair` components. \ No newline at end of file diff --git a/modules/operator-quayregistry-api.adoc b/modules/operator-quayregistry-api.adoc index 91070debd..0efa66e76 100644 --- a/modules/operator-quayregistry-api.adoc +++ b/modules/operator-quayregistry-api.adoc @@ -1,10 +1,11 @@ -[[operator-quayregistry-api]] +:_content-type: CONCEPT +[id="operator-quayregistry-api"] = QuayRegistry API -The Quay Operator provides the `QuayRegistry` custom resource API to declaratively manage `Quay` container registries on the cluster. Use either the OpenShift UI or a command-line tool to interact with this API. +The {productname} Operator provides the `QuayRegistry` custom resource API to declaratively manage `Quay` container registries on the cluster. Use either the {ocp} UI or a command-line tool to interact with this API. -* Creating a `QuayRegistry` will result in the Operator deploying and configuring all necessary resources needed to run Quay on the cluster. -* Editing a `QuayRegistry` will result in the Operator reconciling the changes and creating/updating/deleting objects to match the desired configuration. -* Deleting a `QuayRegistry` will result in garbage collection of all previously created resources and the `Quay` container registry will no longer be available. +* Creating a `QuayRegistry` results in the {productname} Operator deploying and configuring all necessary resources needed to run {productname} on the cluster. +* Editing a `QuayRegistry` results in the {productname} Operator reconciling the changes and creating, updating, and deleting objects to match the desired configuration. +* Deleting a `QuayRegistry` results in garbage collection of all previously created resources. After deletion, the `Quay` container registry is no longer be available. -The `QuayRegistry` API is fairly simple, and the fields are outlined in the following sections. \ No newline at end of file +`QuayRegistry` API fields are outlined in the following sections. \ No newline at end of file diff --git a/modules/operator-quayregistry-status.adoc b/modules/operator-quayregistry-status.adoc index 1382e40eb..02f4cb5f4 100644 --- a/modules/operator-quayregistry-status.adoc +++ b/modules/operator-quayregistry-status.adoc @@ -1,24 +1,20 @@ -[[operator-quayregistry-status]] -= QuayRegistry Status +:_content-type: REFERENCE +[id="operator-quayregistry-status"] += Viewing the status of the QuayRegistry object -Lifecycle observability for a given Quay deployment is reported in the `status` section of the corresponding `QuayRegistry` object. The Operator constantly updates this section, and this should be the first place to look for any problems or state changes in Quay or its managed dependencies. +Lifecycle observability for a given {productname} deployment is reported in the `status` section of the corresponding `QuayRegistry` object. The {productname} Operator constantly updates this section, and this should be the first place to look for any problems or state changes in {productname} or its managed dependencies. -== Registry Endpoint +[id="quayregistry-endpoint"] +== Viewing the registry endpoint -Once Quay is ready to be used, the `status.registryEndpoint` field will be populated with the publicly available hostname of the registry. +Once {productname} is ready to be used, the `status.registryEndpoint` field will be populated with the publicly available hostname of the registry. -== Config Editor Endpoint +[id="quayregistry-current-version"] +== Viewing the version of {productname} in use -Access Quay's UI-based config editor using `status.configEditorEndpoint`. +The current version of {productname} that is running will be reported in `status.currentVersion`. -== Config Editor Credentials Secret - -The username/password for the config editor UI will be stored in a `Secret` in the same namespace as the `QuayRegistry` referenced by `status.configEditorCredentialsSecret`. - -== Current Version - -The current version of Quay that is running will be reported in `status.currentVersion`. - -== Conditions +[id="quayregistry-conditions"] +== Viewing the conditions of your {productname} deployment Certain conditions will be reported in `status.conditions`. \ No newline at end of file diff --git a/modules/operator-resize-storage.adoc b/modules/operator-resize-storage.adoc index b4269b282..b99d57d92 100644 --- a/modules/operator-resize-storage.adoc +++ b/modules/operator-resize-storage.adoc @@ -1,32 +1,42 @@ -[[operator-resize-storage]] +:_content-type: PROCEDURE +[id="operator-resize-storage"] = Resizing Managed Storage -The Quay Operator creates default object storage using the defaults provided by RHOCS when creating a `NooBaa` object (50 Gib). There are two ways to extend this storage; you can resize an existing PVC or add more PVCs to a new storage pool. +When deploying {productname-ocp}, three distinct persistent volume claims (PVCs) are deployed: -== Resize Noobaa PVC +* One for the PostgreSQL 13 registry. +* One for the Clair PostgreSQL 13 registry. +* One that uses NooBaa as a backend storage. -. Log into the OpenShift console and select `Storage` -> `Persistent Volume Claims`. -. Select the `PersistentVolumeClaim` named like `noobaa-default-backing-store-noobaa-pvc-*`. -. From the Action menu, select `Expand PVC`. -. Enter the new size of the Persistent Volume Claim and select `Expand`. +[NOTE] +==== +The connection between {productname} and NooBaa is done through the S3 API and ObjectBucketClaim API in {ocp}. {productname} leverages that API group to create a bucket in NooBaa, obtain access keys, and automatically set everything up. On the backend, or NooBaa, side, that bucket is creating inside of the backing store. As a result, NooBaa PVCs are not mounted or connected to {productname} pods. +==== -After a few minutes (depending on the size of the PVC), the expanded size should reflect in the PVC's `Capacity` field. +The default size for the PostgreSQL 13 and Clair PostgreSQL 13 PVCs is set to 50 GiB. You can expand storage for these PVCs on the {ocp} console by using the following procedure. [NOTE] ==== -Expanding CSI volumes is a Technology Preview feature only. For more information, see link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.6/html/storage/expanding-persistent-volumes[]. +The following procedure shares commonality with link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.5/html/managing_openshift_container_storage/managing-persistent-volume-claims_rhocs#expanding-persistent-volume-claims_rhocs[Expanding Persistent Volume Claims] on {odf}. ==== -== Add Another Storage Pool +[id="resizing-noobaa-pvc"] +== Resizing PostgreSQL 13 PVCs on {productname} + +Use the following procedure to resize the PostgreSQL 13 and Clair PostgreSQL 13 PVCs. + +.Prerequisites + +* You have cluster admin privileges on {ocp}. + +.Procedure + +. Log into the {ocp} console and select *Storage* -> *Persistent Volume Claims*. -. Log into the OpenShift console and select `Networking` -> `Routes`. Make sure the `openshift-storage` project is selected. -. Click on the `Location` field for the `noobaa-mgmt` Route. -. Log into the Noobaa Management Console. -. On the main dashboard, under `Storage Resources`, select `Add Storage Resources`. -. Select `Deploy Kubernetes Pool` -. Enter a new pool name. Click `Next`. -. Choose the number of Pods to manage the pool and set the size per node. Click `Next`. -. Click `Deploy`. +. Select the desired `PersistentVolumeClaim` for either PostgreSQL 13 or Clair PostgreSQL 13, for example, `example-registry-quay-postgres-13`. -After a few minutes, the additional storage pool will be added to the Noobaa resources and available for use by {productname}. +. From the *Action* menu, select *Expand PVC*. +. Enter the new size of the Persistent Volume Claim and select *Expand*. ++ +After a few minutes, the expanded size should reflect in the PVC's *Capacity* field. \ No newline at end of file diff --git a/modules/operator-standalone-object-gateway.adoc b/modules/operator-standalone-object-gateway.adoc index 18f6c6c50..60f716df9 100644 --- a/modules/operator-standalone-object-gateway.adoc +++ b/modules/operator-standalone-object-gateway.adoc @@ -1,27 +1,217 @@ -[[operator-standalone-object-gateway]] -= About The Standalone Object Gateway +:_content-type: PROCEDURE +[id="operator-standalone-object-gateway"] += Leveraging the Multicloud Object Gateway Component in the {odf} Operator for {productname} -As part of a {productname} subscription, users are entitled to use the _Multi-Cloud Object Gateway_ (MCG) component of the Red Hat OpenShift Data Foundation Operator (formerly known as OpenShift Container Storage Operator). This gateway component allows you to provide an S3-compatible object storage interface to Quay backed by Kubernetes `PersistentVolume`-based block storage. The usage is limited to a Quay deployment managed by the Operator and to the exact specifications of the MCG instance as documented below. +As part of a {productname} subscription, users are entitled to use the _Multicloud Object Gateway_ component of the {odf} Operator (formerly known as OpenShift Container Storage Operator). This gateway component allows you to provide an S3-compatible object storage interface to {productname} backed by Kubernetes `PersistentVolume`-based block storage. The usage is limited to a {productname} deployment managed by the Operator and to the exact specifications of the multicloud Object Gateway instance as documented below. Since {productname} does not support local filesystem storage, users can leverage the gateway in combination with Kubernetes `PersistentVolume` storage instead, to provide a supported deployment. A `PersistentVolume` is directly mounted on the gateway instance as a backing store for object storage and any block-based `StorageClass` is supported. -By the nature of `PersistentVolume`, this is not a scale-out, highly available solution and does not replace a scale-out storage system like Red Hat OpenShift Data Foundation (ODF). Only a single instance of the gateway is running. If the pod running the gateway becomes unavailable due to rescheduling, updates or unplanned downtime, this will cause temporary degradation of the connected Quay instances. +By the nature of `PersistentVolume`, this is not a scale-out, highly available solution and does not replace a scale-out storage system like {odf}. Only a single instance of the gateway is running. If the pod running the gateway becomes unavailable due to rescheduling, updates or unplanned downtime, this will cause temporary degradation of the connected {productname} instances. -== Create A Standalone Object Gateway +Using the following procedures, you will install the Local Storage Operator, {odf}, and create a standalone Multicloud Object Gateway to deploy {productname} on {ocp}. -To install the ODF (formerly known as OpenShift Container Storage) Operator and configure a single instance Multi-Cloud Gateway service, follow these steps: +[NOTE] +==== +The following documentation shares commonality with the official link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/deploying_openshift_data_foundation_using_bare_metal_infrastructure/deploy-standalone-multicloud-object-gateway#doc-wrapper[{odf} documentation]. +==== -. Open the OpenShift console and select Operators -> OperatorHub, then select the OpenShift Data Foundation Operator. -. Select Install. Accept all default options and select Install again. -. Within a minute, the Operator will install and create a namespace `openshift-storage`. You can confirm it has completed when the `Status` column is marked `Succeeded`. +[id="installing-local-storage-operator"] +== Installing the Local Storage Operator on {ocp} + +Use the following procedure to install the Local Storage Operator from the *OperatorHub* before creating {odf} clusters on local storage devices. + +. Log in to the *OpenShift Web Console*. + +. Click *Operators* → *OperatorHub*. + +. Type *local storage* into the search box to find the Local Storage Operator from the list of Operators. Click *Local Storage*. + +. Click *Install*. + +. Set the following options on the Install Operator page: ++ +* For Update channel, select *stable*. +* For Installation mode, select *A specific namespace on the cluster*. +* For Installed Namespace, select *Operator recommended namespace openshift-local-storage*. +* For Update approval, select *Automatic*. + +. Click *Install*. + +[id="installing-odf"] +== Installing {odf} on {ocp} + +Use the following procedure to install {odf} on {ocp}. + +.Prerequisites + +* Access to an {ocp} cluster using an account with `cluster-admin` and Operator installation permissions. +* You must have at least three worker nodes in the {ocp} cluster. +* For additional resource requirements, see the link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html-single/planning_your_deployment/index[Planning your deployment] guide. + +.Procedure + +. Log in to the *OpenShift Web Console*. + +. Click *Operators* → *OperatorHub*. + +. Type *OpenShift Data Foundation* in the search box. Click *OpenShift Data Foundation*. + +. Click *Install*. + +. Set the following options on the Install Operator page: ++ +* For Update channel, select the most recent stable version. +* For Installation mode, select *A specific namespace on the cluster*. +* For Installed Namespace, select *Operator recommended Namespace: openshift-storage*. +* For Update approval, select *Automatic* or *Manual*. ++ +If you select *Automatic* updates, then the Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without any intervention. ++ +If you select *Manual* updates, then the OLM creates an update request. As a cluster administrator, you must then manually approve that update request to update the Operator to a newer version. + +* For Console plugin, select *Enable*. + +. Click *Install*. ++ +After the Operator is installed, a pop-up with a message, `Web console update is available` appears on the user interface. Click *Refresh web console* from this pop-up for the console changes to reflect. + +. Continue to the following section, "Creating a standalone Multicloud Object Gateway", to leverage the Multicloud Object Gateway Component for {productname}. + +[id="creating-mcg"] +== Creating a standalone Multicloud Object Gateway using the {ocp} UI + +Use the following procedure to create a standalone Multicloud Object Gateway. + +.Prerequisites + +* You have installed the Local Storage Operator. +* You have installed the {odf} Operator. + +.Procedure + +. In the *OpenShift Web Console*, click *Operators* -> *Installed Operators* to view all installed Operators. ++ +Ensure that the namespace is `openshift-storage`. + +. Click *Create StorageSystem*. + +. On the *Backing storage* page, select the following: +.. Select *Multicloud Object Gateway* for *Deployment type*. +.. Select the *Create a new StorageClass using the local storage devices* option. +.. Click *Next*. ++ +[NOTE] +==== +You are prompted to install the Local Storage Operator if it is not already installed. Click *Install*, and follow the procedure as described in "Installing the Local Storage Operator on {ocp}". +==== + +. On the *Create local volume set* page, provide the following information: +.. Enter a name for the *LocalVolumeSet* and the *StorageClass*. By default, the local volume set name appears for the storage class name. You can change the name. +.. Choose one of the following: ++ +* *Disk on all nodes* ++ +Uses the available disks that match the selected filters on all the nodes. ++ +* *Disk on selected nodes* ++ +Uses the available disks that match the selected filters only on the selected nodes. + +.. From the available list of *Disk Type*, select *SSD/NVMe*. + +.. Expand the *Advanced* section and set the following options: ++ +|=== +|*Volume Mode* | Filesystem is selected by default. Always ensure that Filesystem is selected for Volume Mode. +|*Device Type* | Select one or more device type from the dropdown list. +|*Disk Size*| Set a minimum size of 100GB for the device and maximum available size of the device that needs to be included. +|*Maximum Disks Limit* | This indicates the maximum number of PVs that can be created on a node. If this field is left empty, then PVs are created for all the available disks on the matching nodes. +|=== + +.. Click *Next* ++ +A pop-up to confirm the creation of `LocalVolumeSet` is displayed. + +.. Click *Yes* to continue. + +. In the *Capacity and nodes* page, configure the following: ++ +.. *Available raw capacity* is populated with the capacity value based on all the attached disks associated with the storage class. This takes some time to show up. The *Selected nodes* list shows the nodes based on the storage class. +.. Click *Next* to continue. + +. Optional. Select the *Connect to an external key management service* checkbox. This is optional for cluster-wide encryption. +.. From the *Key Management Service Provider* drop-down list, either select *Vault* or *Thales CipherTrust Manager (using KMIP)*. If you selected *Vault*, go to the next step. If you selected *Thales CipherTrust Manager (using KMIP)*, go to step iii. +.. Select an *Authentication Method*. ++ +Using Token Authentication method ++ +* Enter a unique *Connection Name*, host *Address* of the Vault server ('https://'), *Port* number and *Token*. ++ +* Expand *Advanced Settings* to enter additional settings and certificate details based on your `Vault` configuration: ++ +** Enter the Key Value secret path in *Backend Path* that is dedicated and unique to OpenShift Data Foundation. +** Optional: Enter *TLS Server Name* and *Vault Enterprise Namespace*. +** Upload the respective PEM encoded certificate file to provide the *CA Certificate*, *Client Certificate,* and *Client Private Key*. +** Click *Save* and skip to step iv. ++ +Using Kubernetes authentication method ++ +* Enter a unique Vault *Connection Name*, host *Address* of the Vault server ('https://'), *Port* number and *Role* name. +* Expand *Advanced Settings* to enter additional settings and certificate details based on your Vault configuration: +** Enter the Key Value secret path in *Backend Path* that is dedicated and unique to {odf}. +** Optional: Enter *TLS Server Name* and *Authentication Path* if applicable. +** Upload the respective PEM encoded certificate file to provide the *CA Certificate*, *Client Certificate*, and *Client Private Key*. +** Click *Save* and skip to step iv. + +.. To use *Thales CipherTrust Manager (using KMIP)* as the KMS provider, follow the steps below: + +... Enter a unique *Connection Name* for the Key Management service within the project. +... In the *Address* and *Port* sections, enter the IP of Thales CipherTrust Manager and the port where the KMIP interface is enabled. For example: ++ +* *Address*: 123.34.3.2 +* *Port*: 5696 +... Upload the *Client Certificate*, *CA certificate*, and *Client Private Key*. +... If StorageClass encryption is enabled, enter the Unique Identifier to be used for encryption and decryption generated above. +... The *TLS Server* field is optional and used when there is no DNS entry for the KMIP endpoint. For example,`kmip_all_.ciphertrustmanager.local`. + +.. Select a *Network*. +.. Click *Next*. + +. In the *Review and create* page, review the configuration details. To modify any configuration settings, click *Back*. + +. Click *Create StorageSystem*. + + +[id="creating-standalone-object-gateway"] +== Create A standalone Multicloud Object Gateway using the CLI + +Use the following procedure to install the {odf} (formerly known as OpenShift Container Storage) Operator and configure a single instance Multi-Cloud Gateway service. + +[NOTE] +==== +The following configuration cannot be run in parallel on a cluster with {odf} installed. +==== + +.Procedure + +. On the *OpenShift Web Console*, and then select *Operators* -> *OperatorHub*. + +. Search for *{odf}*, and then select *Install*. + +. Accept all default options, and then select *Install*. + +. Confirm that the Operator has installed by viewing the *Status* column, which should be marked as *Succeeded*. + [WARNING] ----- -When the installation of the ODF Operator is complete, you are prompted to create a storage system. Do not follow this instruction. Instead, create NooBaa object storage as outlined the following steps. ----- -. Create NooBaa object storage. Save the following YAML to a file called `noobaa.yaml`. +==== +When the installation of the {odf} Operator is finished, you are prompted to create a storage system. Do not follow this instruction. Instead, create NooBaa object storage as outlined the following steps. +==== + +. On your machine, create a file named `noobaa.yaml` with the following information: ++ +[source,yaml] + -``` +---- apiVersion: noobaa.io/v1alpha1 kind: NooBaa metadata: @@ -37,27 +227,41 @@ spec: requests: cpu: '0.1' memory: 1Gi -``` +---- + -This will create a single instance deployment of the _Multi-cloud Object Gateway_. +This creates a single instance deployment of the _Multi-cloud Object Gateway_. + . Apply the configuration with the following command: + -``` +[source,terminal] +---- $ oc create -n openshift-storage -f noobaa.yaml -noobaa.noobaa.io/noobaa created -``` +---- + -. After a couple of minutes, you should see that the MCG instance has finished provisioning (`PHASE` column will be set to `Ready`): +.Example output + -``` +[source,terminal] +---- +noobaa.noobaa.io/noobaa created +---- + +. After a few minutes, the _Multi-cloud Object Gateway_ should finish provisioning. You can enter the following command to check its status: ++ +[source,terminal] +---- $ oc get -n openshift-storage noobaas noobaa -w +---- ++ +.Example output ++ +[source,terminal] +---- NAME MGMT-ENDPOINTS S3-ENDPOINTS IMAGE PHASE AGE noobaa [https://10.0.32.3:30318] [https://10.0.32.3:31958] registry.redhat.io/ocs4/mcg-core-rhel8@sha256:56624aa7dd4ca178c1887343c7445a9425a841600b1309f6deace37ce6b8678d Ready 3d18h -``` +---- -. Next, configure a backing store for the gateway. Save the following YAML to a file called `noobaa-pv-backing-store.yaml`. +. Configure a backing store for the gateway by creating the following YAML file, named `noobaa-pv-backing-store.yaml`: + -.noobaa-pv-backing-store.yaml [source,yaml] ---- apiVersion: noobaa.io/v1alpha1 @@ -78,22 +282,28 @@ spec: storageClass: STORAGE-CLASS-NAME <2> type: pv-pool ---- -<1> The overall capacity of the object storage service, adjust as needed -<2> The `StorageClass` to use for the `PersistentVolumes` requested, delete this property to use the cluster default +<1> The overall capacity of the object storage service. Adjust as needed. +<2> The `StorageClass` to use for the `PersistentVolumes` requested. Delete this property to use the cluster default. -. Apply the configuration with the following command: +. Enter the following command to apply the configuration: + -``` +[source,terminal] +---- $ oc create -f noobaa-pv-backing-store.yaml +---- ++ +.Example output ++ +[source,terminal] +---- backingstore.noobaa.io/noobaa-pv-backing-store created -``` +---- + -This creates the backing store configuration for the gateway. All images in Quay will be stored as objects through the gateway in a `PersistentVolume` created by the above configuration. +This creates the backing store configuration for the gateway. All images in {productname} will be stored as objects through the gateway in a `PersistentVolume` created by the above configuration. -. Finally, run the following command to make the `PersistentVolume` backing store the default for all `ObjectBucketClaims` issued by the Operator. +. Run the following command to make the `PersistentVolume` backing store the default for all `ObjectBucketClaims` issued by the {productname} Operator: + -``` +[source,terminal] +---- $ oc patch bucketclass noobaa-default-bucket-class --patch '{"spec":{"placementPolicy":{"tiers":[{"backingStores":["noobaa-pv-backing-store"]}]}}}' --type merge -n openshift-storage -``` - -This concludes the setup of the _Multi-Cloud Object Gateway_ instance for {productname}. Note that this configuration cannot be run in parallel on a cluster with Red Hat OpenShift Data Foundation installed. +---- diff --git a/modules/operator-unmanaged-hpa.adoc b/modules/operator-unmanaged-hpa.adoc index e20035799..5cc93f773 100644 --- a/modules/operator-unmanaged-hpa.adoc +++ b/modules/operator-unmanaged-hpa.adoc @@ -1,21 +1,120 @@ -[[operator-unmanaged-hpa]] -= Disabling the Horizontal Pod Autoscaler +:_content-type: REFERENCE +[id="operator-unmanaged-hpa"] += Using unmanaged Horizontal Pod Autoscalers -`HorizontalPodAutoscalers` have been added to the Clair, Quay, and Mirror pods, so that they now automatically scale during load spikes. +Horizontal Pod Autoscalers (HPAs) are now included with the `Clair`, `Quay`, and `Mirror` pods, so that they now automatically scale during load spikes. -As HPA is configured by default to be `managed`, the number of pods for Quay, Clair and repository mirroring is set to two. This facilitates the avoidance of downtime when updating / reconfiguring Quay via the Operator or during rescheduling events. +As HPA is configured by default to be managed, the number of `Clair`, `Quay`, and `Mirror` pods is set to two. This facilitates the avoidance of downtime when updating or reconfiguring {productname} through the Operator or during rescheduling events. -If you wish to disable autoscaling or create your own `HorizontalPodAutoscaler`, simply specify the component as unmanaged in the `QuayRegistry` instance: +[NOTE] +==== +There is a known issue when disabling the `HorizontalPodAutoscaler` component and attempting to edit the HPA resource itself and increase the value of the `minReplicas` field. When attempting this setup, `Quay` application pods are scaled out by the unmanaged HPA and, after 60 seconds, the replica count is reconciled by the {productname} Operator. As a result, HPA pods are continuously created and then removed by the Operator. +To resolve this issue, you should upgrade your {productname} deployment to at least version 3.12.5 or 3.13.1 and then use the following example to avoid the issue. + +This issue will be fixed in a future version of {productname}. For more information, see link:https://issues.redhat.com/browse/PROJQUAY-6474[PROJQUAY-6474]. +==== + +[id="operator-disabling-hpa"] +== Disabling the Horizontal Pod Autoscaler + +To disable autoscaling or create your own `HorizontalPodAutoscaler` component, specify the component as `unmanaged` in the `QuayRegistry` custom resource definition. To avoid the known issue noted above, you must modify the `QuayRegistry` CRD object and set the replicas equal to `null` for the `quay`, `clair`, and `mirror` components. + +.Procedure + +* Edit the `QuayRegistry` CRD to include the following `replicas: null` for the `quay` component: ++ +[source,terminal] +---- +$ oc edit quayregistry -n +---- ++ [source,yaml] ---- apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: - name: example-registry +name: quay-registry +namespace: quay-enterprise +spec: +components: + - kind: horizontalpodautoscaler + managed: false + - kind: quay + managed: true + overrides: + replicas: null <1> + - kind: clair + managed: true + overrides: + replicas: null + - kind: mirror + managed: true + overrides: + replicas: null +# ... +---- +<1> After setting `replicas: null` in your `QuayRegistry` CRD, a new replica set might be generated because the deployment manifest of the `Quay` app is changed with `replicas: 1`. + +.Verification + +. Create a customized `HorizontalPodAutoscalers` CRD and increase the `minReplicas` amount to a higher value, for exampe, `3`: ++ +[source,yaml] +---- +kind: HorizontalPodAutoscaler +apiVersion: autoscaling/v2 +metadata: + name: quay-registry-quay-app namespace: quay-enterprise spec: - components: - - kind: horizontalpodautoscaler - managed: false + scaleTargetRef: + kind: Deployment + name: quay-registry-quay-app + apiVersion: apps/v1 + minReplicas: 3 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 90 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 90 +---- + +. Ensure that your `QuayRegistry` application successfully starts by entering the following command: ++ +[source,terminal] +---- +$ oc get pod | grep quay-app +---- ++ +.Example output ++ +[source,terminal] +---- +quay-registry-quay-app-5b8fd49d6b-7wvbk 1/1 Running 0 34m +quay-registry-quay-app-5b8fd49d6b-jslq9 1/1 Running 0 3m42s +quay-registry-quay-app-5b8fd49d6b-pskpz 1/1 Running 0 43m +quay-registry-quay-app-upgrade-llctl 0/1 Completed 0 51m +---- + +. Ensure that your `HorizontalPodAutoscalers` successfully starts by entering the following command: ++ +[source,terminal] +---- +$ oc get hpa +---- ++ +[source,terminal] +---- +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +quay-registry-quay-app Deployment/quay-registry-quay-app 67%/90%, 54%/90% 3 20 3 51m ---- \ No newline at end of file diff --git a/modules/operator-unmanaged-mirroring.adoc b/modules/operator-unmanaged-mirroring.adoc index ac4339718..b05f8a7d5 100644 --- a/modules/operator-unmanaged-mirroring.adoc +++ b/modules/operator-unmanaged-mirroring.adoc @@ -1,8 +1,10 @@ -[[operator-unmanaged-mirroring]] -= Unmanaged mirroring +:_content-type: REFERENCE +[id="operator-unmanaged-mirroring"] += Disabling the mirroring component -To disable mirroring explicitly: +To disable mirroring, use the following YAML configuration: +.Unmanaged mirroring example YAML configuration [source,yaml] ---- apiVersion: quay.redhat.com/v1 diff --git a/modules/operator-unmanaged-monitoring.adoc b/modules/operator-unmanaged-monitoring.adoc index cb3e8b00a..4c09df6c4 100644 --- a/modules/operator-unmanaged-monitoring.adoc +++ b/modules/operator-unmanaged-monitoring.adoc @@ -1,10 +1,10 @@ -[[operator-unmanaged-monitoring]] -= Unmanaged monitoring +:_content-type: REFERENCE +[id="operator-unmanaged-monitoring"] += Disabling the monitoring component -If you install the Quay Operator in a single namespace, the monitoring component is automatically set to 'unmanaged'. To enable monitoring in this scenario, see the section xref:monitoring-single-namespace[]. - -To disable monitoring explicitly: +If you install the {productname} Operator in a single namespace, the monitoring component is automatically set to `managed: false`. Use the following reference to explicitly disable monitoring. +.Unmanaged monitoring [source,yaml] ---- apiVersion: quay.redhat.com/v1 @@ -16,4 +16,9 @@ spec: components: - kind: monitoring managed: false ----- \ No newline at end of file +---- + +[NOTE] +==== +Monitoring cannot be enabled when the {productname} Operator is installed in a single namespace. +==== \ No newline at end of file diff --git a/modules/operator-unmanaged-postgres.adoc b/modules/operator-unmanaged-postgres.adoc index 64fe54921..e5a559684 100644 --- a/modules/operator-unmanaged-postgres.adoc +++ b/modules/operator-unmanaged-postgres.adoc @@ -1,26 +1,36 @@ -[[operator-unmanaged-postgres]] -= Using an existing Postgres database +:_content-type: PROCEDURE +[id="operator-unmanaged-postgres"] += Using an existing PostgreSQL database -Requirements: +If you are using an externally managed PostgreSQL database, you must manually enable the `pg_trgm` extension for a successful deployment. -If you are using an externally managed PostgreSQL database, you must manually enable pg_trgm extension for a successful deployment. +[IMPORTANT] +==== +You must not use the same externally managed PostgreSQL database for both {productname} and Clair deployments. Your PostgreSQL database must also not be shared with other workloads, as it might exhaust the natural connection limit on the PostgreSQL side when connection-intensive workloads, like {productname} or Clair, contend for resources. Additionally, pgBouncer is not supported with {productname} or Clair, so it is not an option to resolve this issue. +==== -. Create a configuration file `config.yaml` with the necessary database fields: +Use the following procedure to deploy an existing PostgreSQL database. + +.Procedure + +. Create a `config.yaml` file with the necessary database fields. For example: ++ +.Example `config.yaml` file: + -.config.yaml: [source,yaml] ---- -DB_URI: postgresql://test-quay-database:postgres@test-quay-database:5432/test-quay-database +DB_URI: postgresql://test-quay-database:postgres@test-quay-database:5432/test-quay-database ---- -. Create a Secret using the configuration file: +. Create a `Secret` using the configuration file: + ---- $ kubectl create secret generic --from-file config.yaml=./config.yaml config-bundle-secret ---- + -. Create a QuayRegistry YAML file `quayregistry.yaml` which marks the `postgres` component as unmanaged and references the created Secret: +. Create a `QuayRegistry.yaml` file which marks the `postgres` component as `unmanaged` and references the created `Secret`. For example: ++ +.Example `quayregistry.yaml` file + -.quayregistry.yaml [source,yaml] ---- apiVersion: quay.redhat.com/v1 @@ -34,4 +44,7 @@ spec: - kind: postgres managed: false ---- -. Deploy the registry as detailed in the following sections. + +.Next steps + +* Continue to the following sections to deploy the registry. \ No newline at end of file diff --git a/modules/operator-unmanaged-redis.adoc b/modules/operator-unmanaged-redis.adoc index 115e8db32..6ab77c137 100644 --- a/modules/operator-unmanaged-redis.adoc +++ b/modules/operator-unmanaged-redis.adoc @@ -1,31 +1,35 @@ -[[operator-unmanaged-redis]] -= Using external Redis +[id="operator-unmanaged-redis"] += Using an unmanaged Redis database -If you wish to use an external Redis database, set the component as unmanaged in the `QuayRegistry` instance: +Use the following procedure to set up an external Redis database. +.Procedure -. Create a configuration file `config.yaml` with the necessary redis fields: +. Create a `config.yaml` file using the following Redis fields: + [source,yaml] ---- +# ... BUILDLOGS_REDIS: - host: quay-server.example.com + host: port: 6379 ssl: false - +# ... USER_EVENTS_REDIS: - host: quay-server.example.com + host: port: 6379 ssl: false +# ... ---- -. Create a Secret using the configuration file +. Enter the following command to create a secret using the configuration file: + -``` +[source,terminal] +---- $ oc create secret generic --from-file config.yaml=./config.yaml config-bundle-secret -``` +---- -. Create a QuayRegistry YAML file `quayregistry.yaml` which marks redis component as unmanaged and references the created Secret: +. Create a `quayregistry.yaml` file that sets the Redis component to `unmanaged` and references the created secret: + [source,yaml] ---- @@ -39,7 +43,7 @@ spec: components: - kind: redis managed: false +# ... ---- -. Deploy the registry - +. Deploy the {productname} registry. \ No newline at end of file diff --git a/modules/operator-unmanaged-route.adoc b/modules/operator-unmanaged-route.adoc index 14edf5bdb..f8d95379a 100644 --- a/modules/operator-unmanaged-route.adoc +++ b/modules/operator-unmanaged-route.adoc @@ -1,9 +1,12 @@ -[[operator-unmanaged-route]] -= Disabling Route Component +:_content-type: PROCEDURE +[id="operator-unmanaged-route"] += Disabling the Route component -To prevent the Operator from creating a `Route`: +Use the following procedure to prevent the {productname} Operator from creating a route. -. Mark the component as unmanaged in the `QuayRegistry`: +.Procedure + +. Set the component as `managed: false` in the `quayregistry.yaml` file: + [source,yaml] ---- @@ -18,21 +21,20 @@ spec: managed: false ---- -. Specify that you want Quay to handle TLS in the configuration, by editing the `config.yaml` file: +. Edit the `config.yaml` file to specify that {productname} handles SSL/TLS. For example: + -.config.yaml [source,yaml] ---- -... +# ... EXTERNAL_TLS_TERMINATION: false -... +# ... SERVER_HOSTNAME: example-registry-quay-quay-enterprise.apps.user1.example.com -... +# ... PREFERRED_URL_SCHEME: https -... +# ... ---- + -If you do not configure the unmanaged Route correctly, you will see an error similar to the following: +If you do not configure the unmanaged route correctly, the following error is returned: + [source,json] ---- @@ -50,8 +52,7 @@ If you do not configure the unmanaged Route correctly, you will see an error sim } ---- - [NOTE] ==== -Disabling the default `Route` means you are now responsible for creating a `Route`, `Service`, or `Ingress` in order to access the Quay instance and that whatever DNS you use must match the `SERVER_HOSTNAME` in the Quay config. +Disabling the default route means you are now responsible for creating a `Route`, `Service`, or `Ingress` in order to access the {productname} instance. Additionally, whatever DNS you use must match the `SERVER_HOSTNAME` in the {productname} config. ==== diff --git a/modules/operator-unmanaged-storage-noobaa.adoc b/modules/operator-unmanaged-storage-noobaa.adoc index 4902feb6b..51755c4b6 100644 --- a/modules/operator-unmanaged-storage-noobaa.adoc +++ b/modules/operator-unmanaged-storage-noobaa.adoc @@ -1,9 +1,15 @@ -[[operator-unmanaged-storage-noobaa]] -= NooBaa unmanaged storage +[id="operator-unmanaged-storage-noobaa"] += Using an unmanaged NooBaa instance + +Use the following procedure to use an unmanaged NooBaa instance for your {productname} deployment. + +.Procedure . Create a NooBaa Object Bucket Claim in the console at Storage -> Object Bucket Claims. -. Retrieve the Object Bucket Claim Data details including the Access Key, Bucket Name, Endpoint (hostname) and Secret Key. -. Create a `config.yaml` configuration file, using the information for the Object Bucket Claim: + +. Retrieve the Object Bucket Claim Data details including the `Access Key`, `Bucket Name`, `Endpoint (hostname)`, and `Secret Key`. + +. Create a `config.yaml` configuration file using the information for the Object Bucket Claim. For example: + [source,yaml] ---- diff --git a/modules/operator-unmanaged-storage.adoc b/modules/operator-unmanaged-storage.adoc index 6ad6dd178..f092d101a 100644 --- a/modules/operator-unmanaged-storage.adoc +++ b/modules/operator-unmanaged-storage.adoc @@ -1,10 +1,14 @@ -[[operator-unmanaged-storage]] -= Unmanaged storage +:_content-type: REFERENCE +[id="operator-unmanaged-storage"] += Using unmanaged storage -Some configuration examples for unmanaged storage are provided in this section for convenience. See the {productname} configuration guide for full details for setting up object storage. +This section provides configuration examples for unmanaged storage for your convenience. Refer to the {productname} configuration guide for complete instructions on how to set up object storage. +[id="aws-s3-storage-example"] == AWS S3 storage +Use the following example when configuring AWS S3 storage for your {productname} deployment. + [source,yaml] ---- DISTRIBUTED_STORAGE_CONFIG: @@ -14,14 +18,78 @@ DISTRIBUTED_STORAGE_CONFIG: s3_access_key: ABCDEFGHIJKLMN s3_secret_key: OL3ABCDEFGHIJKLMN s3_bucket: quay_bucket + s3_region: storage_path: /datastorage/registry DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] DISTRIBUTED_STORAGE_PREFERENCE: - s3Storage ---- +[id="aws-cloudfront-storage-example"] +== AWS Cloudfront storage + +Use the following example when configuring AWS Cloudfront for your {productname} deployment. + +[NOTE] +==== +* When configuring AWS Cloudfront storage, the following conditions must be met for proper use with {productname}: +** You must set an *Origin path* that is consistent with {productname}'s storage path as defined in your `config.yaml` file. Failure to meet this require results in a `403` error when pulling an image. For more information, see link:https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginPath[Origin path]. +** You must configure a link:https://docs.aws.amazon.com/whitepapers/latest/secure-content-delivery-amazon-cloudfront/s3-origin-with-cloudfront.html[*Bucket policy*] and a link:https://docs.aws.amazon.com/AmazonS3/latest/userguide/cors.html[*Cross-origin resource sharing (CORS)*] policy. +==== + +.Cloudfront S3 example YAML +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - CloudFrontedS3Storage + - cloudfront_distribution_domain: + cloudfront_key_id: + cloudfront_privatekey_filename: + host: + s3_access_key: + s3_bucket: + s3_secret_key: + storage_path: + s3_region: +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: + - default +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- + +.Bucket policy example +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::user/CloudFront Origin Access Identity " + }, + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" + }, + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::user/CloudFront Origin Access Identity " + }, + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::" + } + ] +} + +---- + +[id="gcp-storage-example"] == Google Cloud storage +Use the following example when configuring Google Cloud storage for your {productname} deployment. + [source,yaml] ---- DISTRIBUTED_STORAGE_CONFIG: @@ -31,12 +99,17 @@ DISTRIBUTED_STORAGE_CONFIG: bucket_name: quay-bucket secret_key: FhDAYe2HeuAKfvZCAGyOioNaaRABCDEFGHIJKLMN storage_path: /datastorage/registry + boto_timeout: 120 <1> DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] DISTRIBUTED_STORAGE_PREFERENCE: - googleCloudStorage ---- +<1> Optional. The time, in seconds, until a timeout exception is thrown when attempting to read from a connection. The default is `60` seconds. Also encompasses the time, in seconds, until a timeout exception is thrown when attempting to make a connection. The default is `60` seconds. + +[id="azure-storage-example"] +== Microsoft Azure storage -== Azure storage +Use the following example when configuring Microsoft Azure storage for your {productname} deployment. [source,yaml] ---- @@ -53,11 +126,15 @@ DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] DISTRIBUTED_STORAGE_PREFERENCE: - azureStorage ---- -<1> The `endpoint_url` parameter for Azure storage is optional and can be used with Microsoft Azure Government (MAG) endpoints. If left blank, the `endpoint_url` will connect to the normal Azure region. +<1> The `endpoint_url` parameter for Microsoft Azure storage is optional and can be used with Microsoft Azure Government (MAG) endpoints. If left blank, the `endpoint_url` will connect to the normal Microsoft Azure region. + As of {productname} 3.7, you must use the Primary endpoint of your MAG Blob service. Using the Secondary endpoint of your MAG Blob service will result in the following error: `AuthenticationErrorDetail:Cannot find the claimed account when trying to GetProperties for the account whusc8-secondary`. +[id="ceph-rados-storage-example"] == Ceph/RadosGW Storage + +Use the following example when configuring Ceph/RadosGW storage for your {productname} deployment. + [source,yaml] ---- DISTRIBUTED_STORAGE_CONFIG: @@ -75,8 +152,11 @@ DISTRIBUTED_STORAGE_PREFERENCE: #must contain name of the storage config - radosGWStorage ---- +[id="swift-storage-example"] == Swift storage +Use the following example when configuring Swift storage for your {productname} deployment. + [source,yaml] ---- DISTRIBUTED_STORAGE_CONFIG: @@ -86,7 +166,10 @@ DISTRIBUTED_STORAGE_CONFIG: swift_password: swift_password_here swift_container: swift_container_here auth_url: https://example.org/swift/v1/quay - auth_version: 1 + auth_version: 3 + os_options: + tenant_id: + user_domain_name: ca_cert_path: /conf/stack/swift.cert" storage_path: /datastorage/registry DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] @@ -94,17 +177,18 @@ DISTRIBUTED_STORAGE_PREFERENCE: - swiftStorage ---- +[id="noobaa-unmanaged-storage-example"] == NooBaa unmanaged storage Use the following procedure to deploy NooBaa as your unmanaged storage configuration. .Procedure -. Create a NooBaa Object Bucket Claim in the {product-title} console by navigating to *Storage* -> *Object Bucket Claims*. +. Create a NooBaa Object Bucket Claim in the {productname} console by navigating to *Storage* -> *Object Bucket Claims*. . Retrieve the Object Bucket Claim Data details, including the Access Key, Bucket Name, Endpoint (hostname), and Secret Key. -. Create a `config.yaml` configuration file using the information for the Object Bucket Claim: +. Create a `config.yaml` configuration file that uses the information for the Object Bucket Claim: + [source,yaml] ---- diff --git a/modules/operator-upgrade.adoc b/modules/operator-upgrade.adoc index 80041a7fe..85cbc175e 100644 --- a/modules/operator-upgrade.adoc +++ b/modules/operator-upgrade.adoc @@ -1,50 +1,304 @@ -[[operator-upgrade]] -= Upgrading the Quay Operator Overview +[id="operator-upgrade"] += Upgrading the {productname} Operator Overview -The Quay Operator follows a _synchronized versioning_ scheme, which means that each version of the Operator is tied to the version of Quay and the components that it manages. There is no field on the `QuayRegistry` custom resource which sets the version of Quay to deploy; the Operator only knows how to deploy a single version of all components. This scheme was chosen to ensure that all components work well together and to reduce the complexity of the Operator needing to know how to manage the lifecycles of many different versions of Quay on Kubernetes. +The {productname} Operator follows a _synchronized versioning_ scheme, which means that each version of the Operator is tied to the version of {productname} and the components that it manages. There is no field on the `QuayRegistry` custom resource which sets the version of {productname} to `deploy`; the Operator can only deploy a single version of all components. This scheme was chosen to ensure that all components work well together and to reduce the complexity of the Operator needing to know how to manage the lifecycles of many different versions of {productname} on Kubernetes. +[id="operator-lifecycle-manager"] == Operator Lifecycle Manager -The Quay Operator should be installed and upgraded using the link:https://docs.openshift.com/container-platform/{producty}/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)]. When creating a `Subscription` with the default `approvalStrategy: Automatic`, OLM will automatically upgrade the Quay Operator whenever a new version becomes available. +The {productname} Operator should be installed and upgraded using the link:https://docs.openshift.com/container-platform/{ocp-y}/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)]. When creating a `Subscription` with the default `approvalStrategy: Automatic`, OLM will automatically upgrade the {productname} Operator whenever a new version becomes available. [WARNING] ==== -When the Quay Operator is installed via Operator Lifecycle Manager, it may be configured to support automatic or manual upgrades. This option is shown on the *Operator Hub* page for the Quay Operator during installation. It can also be found in the Quay Operator `Subscription` object via the `approvalStrategy` field. Choosing `Automatic` means that your Quay Operator will automatically be upgraded whenever a new Operator version is released. If this is not desirable, then the `Manual` approval strategy should be selected. +When the {productname} Operator is installed by Operator Lifecycle Manager, it might be configured to support automatic or manual upgrades. This option is shown on the *OperatorHub* page for the {productname} Operator during installation. It can also be found in the {productname} Operator `Subscription` object by the `approvalStrategy` field. Choosing `Automatic` means that your {productname} Operator will automatically be upgraded whenever a new Operator version is released. If this is not desirable, then the `Manual` approval strategy should be selected. ==== -== Upgrading the Quay Operator +[id="upgrading-quay-operator"] +== Upgrading the {productname} Operator -The standard approach for upgrading installed Operators on OpenShift is documented at link:https://docs.openshift.com/container-platform/4.7/operators/admin/olm-upgrading-operators.html[Upgrading installed Operators]. +The standard approach for upgrading installed Operators on {ocp} is documented at link:https://docs.openshift.com/container-platform/{ocp-y}/operators/admin/olm-upgrading-operators.html[Upgrading installed Operators]. -In general, {productname} supports upgrades from a prior (N-1) minor version only. For example, upgrading directly from {productname} 3.0.5 to the latest version of 3.5 is not supported. Instead, users would have to upgrade as follows: +In general, {productname} supports upgrades from a prior (N-1) minor version only. For example, upgrading directly from {productname} 3.5 to the latest version of {producty-min} is not supported. Instead, users would have to upgrade as follows: -. 3.0.5 -> 3.1.3 -. 3.1.3 -> 3.2.2 -. 3.2.2 -> 3.3.4 -. 3.3.4 -> 3.4.z -. 3.4.z -> 3.5.z +. 3.9.z -> 3.10.z +. 3.10.z -> 3.11.z +. 3.11.z -> 3.12.z +. 3.12.z -> 3.13.z +. 3.13.z -> 3.14.z This is required to ensure that any necessary database migrations are done correctly and in the right order during the upgrade. -In some cases, {productname} supports direct, single-step upgrades from prior (N-2, N-3) minor versions. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. The following upgrade paths are supported: +In some cases, {productname} supports direct, single-step upgrades from prior (N-2, N-3) minor versions. This simplifies the upgrade procedure for customers on older releases. The following upgrade paths are supported for {productname} {productmin}: -. 3.3.z -> 3.6.z -. 3.4.z -> 3.6.z -. 3.4.z -> 3.7.z -. 3.5.z -> 3.7.z -. 3.7.z -> 3.8.z +* 3.11.z -> {productmin} +* 3.12.z -> {productmin} +* 3.13.z -> {productmin} -For users on standalone deployments of Quay wanting to upgrade to 3.8, see the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#standalone_upgrade[Standalone upgrade] guide. +For users on standalone deployments of {productname} wanting to upgrade to {productmin} see the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#standalone_upgrade[Standalone upgrade] guide. +[id="upgrading-red-hat-quay"] +=== Upgrading {productname} to version {productmin} -=== Upgrading Quay -To update Quay from one minor version to the next, for example, 3.4 -> 3.5, you need to change the update channel for the Quay Operator. +To update {productname} from one minor version to the next, for example, {producty-n1} -> {productmin}, you must change the update channel for the {productname} Operator. -For `z` stream upgrades, for example, 3.4.2 -> 3.4.3, updates are released in the major-minor channel that the user initially selected during install. The procedure to perform a `z` stream upgrade depends on the `approvalStrategy` as outlined above. If the approval strategy is set to `Automatic`, the Quay Operator will upgrade automatically to the newest `z` stream. This results in automatic, rolling Quay updates to newer `z` streams with little to no downtime. Otherwise, the update must be manually approved before installation can begin. +.Procedure -[[upgrade-33-36]] -=== Notes on upgrading directly from 3.3.z or 3.4.z to 3.6 +. In the {ocp} Web Console, navigate to *Operators* -> *Installed Operators*. +. Click on the {productname} Operator. + +. Navigate to the *Subscription* tab. + +. Under *Subscription details* click *Update channel*. + +. Select *stable-3.14* -> *Save*. + +. Check the progress of the new installation under *Upgrade status*. Wait until the upgrade status changes to *1 installed* before proceeding. + +. In your {ocp} cluster, navigate to *Workloads* -> *Pods*. Existing pods should be terminated, or in the process of being terminated. + +. Wait for the following pods, which are responsible for upgrading the database and alembic migration of existing data, to spin up: `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade`. + +. After the `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade` pods are marked as *Completed*, the remaining pods for your {productname} deployment spin up. This takes approximately ten minutes. + +. Verify that the `quay-database` uses the `postgresql-13` image, and `clair-postgres` pods now uses the `postgresql-15` image. + +. After the `quay-app` pod is marked as *Running*, you can reach your {productname} registry. + +[id="upgrading-minor-red-hat-quay"] +=== Upgrading to the next minor release version + +For `z` stream upgrades, for example, 3.13.1 -> 3.13.2, updates are released in the major-minor channel that the user initially selected during install. The procedure to perform a `z` stream upgrade depends on the `approvalStrategy` as outlined above. If the approval strategy is set to `Automatic`, the {productname} Operator upgrades automatically to the newest `z` stream. This results in automatic, rolling {productname} updates to newer `z` streams with little to no downtime. Otherwise, the update must be manually approved before installation can begin. + +//// +[id="upgrading-312-to-313"] +=== Upgrading from {productname} 3.12 to 3.13 + +With {productname} 3.13, the `volumeSize` parameter has been implemented for use with the `clairpostgres` component of the `QuayRegistry` custom resource definition (CRD). This replaces the `volumeSize` parameter that was previously used for the `clair` component of the same CRD. + +If your {productname} 3.12 `QuayRegistry` custom resource definition (CRD) implemented a volume override for the `clair` component, you must ensure that the `volumeSize` field is included under the `clairpostgres` component of the `QuayRegistry` CRD. + +[IMPORTANT] +==== +Failure to move `volumeSize` from the `clair` component to the `clairpostgres` component will result in a failed upgrade to version 3.13. +==== + +For example: + +[source,yaml] +---- +spec: + components: + - kind: clair + managed: true + - kind: clairpostgres + managed: true + overrides: + volumeSize: +---- + + +[id="config-editor-removal"] +== Removing config editor objects on {productname} Operator + +The config editor has been removed from the {productname} Operator on {ocp} deployments. As a result, the `quay-config-editor` pod no longer deploys, and users cannot check the status of the config editor route. Additionally, the Config Editor Endpoint no longer generates on the {productname} Operator *Details* page. + +Users with existing {productname} Operators who are upgrading from 3.7, 3.8, or 3.9 to {producty} must manually remove the {productname} config editor by removing the `pod`, `deployment`, `route,` `service`, and `secret` objects. + +To remove the `deployment`, `route,` `service`, and `secret` objects, use the following procedure. + +.Prerequisites + +* You have deployed {productname} version 3.7, 3.8, or 3.9. +* You have a valid `QuayRegistry` object. + +.Procedure + +. Obtain the `quayregistry-quay-config-editor` route object by entering the following command: ++ +[source,terminal] +---- +$ oc get route +---- ++ +.Example output ++ +[source,terminal] +---- +--- +quayregistry-quay-config-editor-c866f64c4-68gtb 1/1 Running 0 49m +--- +---- + +. Remove the `quayregistry-quay-config-editor` route object by entering the following command: ++ +[source,terminal] +---- +$ oc delete route quayregistry-quay-config-editor +---- + +. Obtain the `quayregistry-quay-config-editor` deployment object by entering the following command: ++ +[source,terminal] +---- +$ oc get deployment +---- ++ +.Example output ++ +[source,terminal] +---- +--- +quayregistry-quay-config-editor +--- +---- + +. Remove the `quayregistry-quay-config-editor` deployment object by entering the following command: ++ +[source,terminal] +---- +$ oc delete deployment quayregistry-quay-config-editor +---- + +. Obtain the `quayregistry-quay-config-editor` service object by entering the following command: ++ +[source,terminal] +---- +$ oc get svc | grep config-editor +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry-quay-config-editor ClusterIP 172.30.219.194 80/TCP 6h15m +---- + +. Remove the `quayregistry-quay-config-editor` service object by entering the following command: ++ +[source,terminal] +---- +$ oc delete service quayregistry-quay-config-editor +---- + +. Obtain the `quayregistry-quay-config-editor-credentials` secret by entering the following command: ++ +[source,terminal] +---- +$ oc get secret | grep config-editor +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry-quay-config-editor-credentials-mb8kchfg92 Opaque 2 52m +---- + +. Delete the `quayregistry-quay-config-editor-credentials` secret by entering the following command: ++ +[source,terminal] +---- +$ oc delete secret quayregistry-quay-config-editor-credentials-mb8kchfg92 +---- + +. Obtain the `quayregistry-quay-config-editor` pod by entering the following command: ++ +[source,terminal] +---- +$ $ oc get pod +---- ++ +.Example output ++ +[source,terminal] +---- +--- +quayregistry-quay-config-editor-c866f64c4-68gtb 1/1 Running 0 49m +--- +---- + +. Delete the `quayregistry-quay-config-editor` pod by entering the following command: ++ +[source,terminal] +---- +$ oc delete pod quayregistry-quay-app-6bc4fbd456-8bc9c +---- + +[id="upgrading-postgresql-databases"] +=== Updating {productname} from 3.8 -> 3.9 + +[IMPORTANT] +==== +If your {productname} deployment is upgrading from one y-stream to the next, for example, from 3.8.10 -> 3.8.11, you must not switch the upgrade channel from `stable-3.8` to `stable-3.9`. Changing the upgrade channel in the middle of a y-stream upgrade will disallow {productname} from upgrading to 3.9. This is a known issue and will be fixed in a future version of {productname}. +==== + +When updating {productname} 3.8 -> 3.9, the Operator automatically upgrades the existing PostgreSQL databases for Clair and {productname} from version 10 to version 13. + +[IMPORTANT] +==== +* This upgrade is irreversible. It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. +* By default, {productname} is configured to remove old persistent volume claims (PVCs) from PostgreSQL 10. To disable this setting and backup old PVCs, you must set `POSTGRES_UPGRADE_RETAIN_BACKUP` to `True` in your `quay-operator` `Subscription` object. +==== + +.Prerequisites + +* You have installed {productname} 3.8 on {ocp}. +* 100 GB of free, additional storage. ++ +During the upgrade process, additional persistent volume claims (PVCs) are provisioned to store the migrated data. This helps prevent a destructive operation on user data. The upgrade process rolls out PVCs for 50 GB for both the {productname} database upgrade, and the Clair database upgrade. + +.Procedure + +. Optional. Back up your old PVCs from PostgreSQL 10 by setting `POSTGRES_UPGRADE_RETAIN_BACKUP` to `True` your `quay-operator` `Subscription` object. For example: ++ +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: quay-operator + namespace: quay-enterprise +spec: + channel: stable-3.8 + name: quay-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: POSTGRES_UPGRADE_RETAIN_BACKUP + value: "true" +---- + +. In the {ocp} Web Console, navigate to *Operators* -> *Installed Operators*. + +. Click on the {productname} Operator. + +. Navigate to the *Subscription* tab. + +. Under *Subscription details* click *Update channel*. + +. Select *stable-3.9* and save the changes. + +. Check the progress of the new installation under *Upgrade status*. Wait until the upgrade status changes to *1 installed* before proceeding. + +. In your {ocp} cluster, navigate to *Workloads* -> *Pods*. Existing pods should be terminated, or in the process of being terminated. + +. Wait for the following pods, which are responsible for upgrading the database and alembic migration of existing data, to spin up: `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade`. + +. After the `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade` pods are marked as *Completed*, the remaining pods for your {productname} deployment spin up. This takes approximately ten minutes. + +. Verify that the `quay-database` and `clair-postgres` pods now use the `postgresql-13` image. + +. After the `quay-app` pod is marked as *Running*, you can reach your {productname} registry. + + +[id="upgrade-33-36"] +=== Upgrading directly from 3.3.z or 3.4.z to 3.6 + +The following section provides important information when upgrading from {productname} 3.3.z or 3.4.z to 3.6. + +[id="upgrading-edge-routing-enabled"] ==== Upgrading with edge routing enabled * Previously, when running a 3.3.z version of {productname} with edge routing enabled, users were unable to upgrade to 3.4.z versions of {productname}. This has been resolved with the release of {productname} 3.6. @@ -71,23 +325,30 @@ spec: ... ---- -==== Upgrading with custom TLS certificate/key pairs without Subject Alternative Names -There is an issue for customers using their own TLS certificate/key pairs without Subject Alternative Names (SANs) when upgrading from {productname} 3.3.4 to {productname} 3.6 directly. During the upgrade to {productname} 3.6, the deployment is blocked, with the error message from the Quay Operator pod logs indicating that the Quay TLS certificate must have SANs. +[id="upgrading-with-tls-cert-key-pairs-without-san"] +==== Upgrading with custom SSL/TLS certificate/key pairs without Subject Alternative Names + +There is an issue for customers using their own SSL/TLS certificate/key pairs without Subject Alternative Names (SANs) when upgrading from {productname} 3.3.4 to {productname} 3.6 directly. During the upgrade to {productname} 3.6, the deployment is blocked, with the error message from the {productname} Operator pod logs indicating that the {productname} SSL/TLS certificate must have SANs. -If possible, you should regenerate your TLS certificates with the correct hostname in the SANs. A possible workaround involves defining an environment variable in the `quay-app`, `quay-upgrade` and `quay-config-editor` pods after upgrade to enable CommonName matching: +If possible, you should regenerate your SSL/TLS certificates with the correct hostname in the SANs. A possible workaround involves defining an environment variable in the `quay-app`, `quay-upgrade` and `quay-config-editor` pods after upgrade to enable CommonName matching: -``` +---- GODEBUG=x509ignoreCN=0 -``` +---- -The `GODEBUG=x509ignoreCN=0` flag enables the legacy behavior of treating the CommonName field on X.509 certificates as a host name when no SANs are present. However, this workaround is not recommended, as it will not persist across a redeployment. +The `GODEBUG=x509ignoreCN=0` flag enables the legacy behavior of treating the CommonName field on X.509 certificates as a hostname when no SANs are present. However, this workaround is not recommended, as it will not persist across a redeployment. -==== Configuring Clair v4 when upgrading from 3.3.z or 3.4.z to 3.6 using the Quay Operator -To set up Clair v4 on a new {productname} deployment on OpenShift, it is highly recommended to use the Quay Operator. By default, the Quay Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair automatically. -For instructions on setting up Clair v4 on OpenShift, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-openshift[Setting Up Clair on a {productname} OpenShift deployment]. +[id="configuring-clair-v4-upgrading-from-33-34-to-36"] +==== Configuring Clair v4 when upgrading from 3.3.z or 3.4.z to 3.6 using the {productname} Operator +To set up Clair v4 on a new {productname} deployment on {ocp}, it is highly recommended to use the {productname} Operator. By default, the {productname} Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair automatically. + +//link needs replaced +For instructions about setting up Clair v4 in a disconnected {ocp} cluster, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-openshift[Setting Up Clair on a {productname} OpenShift deployment]. + +[id="swift-config-upgrading-from-33-to-36"] === Swift configuration when upgrading from 3.3.z to 3.6 When upgrading from {productname} 3.3.z to 3.6.z, some users might receive the following error: `Switch auth v3 requires tenant_id (string) in os_options`. As a workaround, you can manually update your `DISTRIBUTED_STORAGE_CONFIG` to add the `os_options` and `tenant_id` parameters: @@ -108,14 +369,17 @@ When upgrading from {productname} 3.3.z to 3.6.z, some users might receive the f swift_password: ***** swift_user: ***** ---- +//// -=== Changing the update channel for an Operator +[id="changing-update-channel-for-operator"] +=== Changing the update channel for the {productname} Operator -The subscription of an installed Operator specifies an update channel, which is used to track and receive updates for the Operator. To upgrade the Quay Operator to start tracking and receiving updates from a newer channel, change the update channel in the *Subscription* tab for the installed Quay Operator. For subscriptions with an `Automatic` approval strategy, the upgrade begins automatically and can be monitored on the page that lists the Installed Operators. +The subscription of an installed Operator specifies an update channel, which is used to track and receive updates for the Operator. To upgrade the {productname} Operator to start tracking and receiving updates from a newer channel, change the update channel in the *Subscription* tab for the installed {productname} Operator. For subscriptions with an `Automatic` approval strategy, the upgrade begins automatically and can be monitored on the page that lists the Installed Operators. +[id="manually-approving-pending-operator-upgrade"] === Manually approving a pending Operator upgrade -If an installed Operator has the approval strategy in its subscription set to `Manual`, when new updates are released in its current update channel, the update must be manually approved before installation can begin. If the Quay Operator has a pending upgrade, this status will be displayed in the list of Installed Operators. In the `Subscription` tab for the Quay Operator, you can preview the install plan and review the resources that are listed as available for upgrade. If satisfied, click `Approve` and return to the page that lists Installed Operators to monitor the progress of the upgrade. +If an installed Operator has the approval strategy in its subscription set to `Manual`, when new updates are released in its current update channel, the update must be manually approved before installation can begin. If the {productname} Operator has a pending upgrade, this status will be displayed in the list of Installed Operators. In the `Subscription` tab for the {productname} Operator, you can preview the install plan and review the resources that are listed as available for upgrade. If satisfied, click `Approve` and return to the page that lists Installed Operators to monitor the progress of the upgrade. The following image shows the *Subscription* tab in the UI, including the update `Channel`, the `Approval` strategy, the `Upgrade status` and the `InstallPlan`: @@ -125,23 +389,28 @@ The list of Installed Operators provides a high-level summary of the current Qua image:installed-operators-list.png[Installed Operators] -== Upgrading a QuayRegistry +[id="upgrading-quayregistry"] +== Upgrading a QuayRegistry resource -When the Quay Operator starts, it immediately looks for any `QuayRegistries` it can find in the namespace(s) it is configured to watch. When it finds one, the following logic is used: +When the {productname} Operator starts, it immediately looks for any `QuayRegistries` it can find in the namespace(s) it is configured to watch. When it finds one, the following logic is used: * If `status.currentVersion` is unset, reconcile as normal. * If `status.currentVersion` equals the Operator version, reconcile as normal. * If `status.currentVersion` does not equal the Operator version, check if it can be upgraded. If it can, perform upgrade tasks and set the `status.currentVersion` to the Operator's version once complete. If it cannot be upgraded, return an error and leave the `QuayRegistry` and its deployed Kubernetes objects alone. +[id="upgrading-quayecosystem"] == Upgrading a QuayEcosystem -Upgrades are supported from previous versions of the Operator which used the `QuayEcosystem` API for a limited set of configurations. To ensure that migrations do not happen unexpectedly, a special label needs to be applied to the `QuayEcosystem` for it to be migrated. A new `QuayRegistry` will be created for the Operator to manage, but the old `QuayEcosystem` will remain until manually deleted to ensure that you can roll back and still access Quay in case anything goes wrong. To migrate an existing `QuayEcosystem` to a new `QuayRegistry`, follow these steps: +Upgrades are supported from previous versions of the Operator which used the `QuayEcosystem` API for a limited set of configurations. To ensure that migrations do not happen unexpectedly, a special label needs to be applied to the `QuayEcosystem` for it to be migrated. A new `QuayRegistry` will be created for the Operator to manage, but the old `QuayEcosystem` will remain until manually deleted to ensure that you can roll back and still access Quay in case anything goes wrong. To migrate an existing `QuayEcosystem` to a new `QuayRegistry`, use the following procedure. + +.Procedure . Add `"quay-operator/migrate": "true"` to the `metadata.labels` of the `QuayEcosystem`. + -``` +[source,terminal] +---- $ oc edit quayecosystem -``` +---- + [source,yaml] ---- @@ -151,30 +420,35 @@ metadata: ---- . Wait for a `QuayRegistry` to be created with the same `metadata.name` as your `QuayEcosystem`. The `QuayEcosystem` will be marked with the label `"quay-operator/migration-complete": "true"`. -. Once the `status.registryEndpoint` of the new `QuayRegistry` is set, access Quay and confirm all data and settings were migrated successfully. +. After the `status.registryEndpoint` of the new `QuayRegistry` is set, access {productname} and confirm that all data and settings were migrated successfully. -. When you are confident everything worked correctly, you may delete the `QuayEcosystem` and Kubernetes garbage collection will clean up all old resources. +. If everything works correctly, you can delete the `QuayEcosystem` and Kubernetes garbage collection will clean up all old resources. +[id="reverting-quayecosystem-upgrade"] === Reverting QuayEcosystem Upgrade If something goes wrong during the automatic upgrade from `QuayEcosystem` to `QuayRegistry`, follow these steps to revert back to using the `QuayEcosystem`: +.Procedure + . Delete the `QuayRegistry` using either the UI or `kubectl`: + -```sh +[source,terminal] +---- $ kubectl delete -n quayregistry -``` +---- . If external access was provided using a `Route`, change the `Route` to point back to the original `Service` using the UI or `kubectl`. [NOTE] ==== -If your `QuayEcosystem` was managing the Postgres database, the upgrade process will migrate your data to a new Postgres database managed by the upgraded Operator. Your old database will not be changed or removed but Quay will no longer use it once the migration is complete. If there are issues during the data migration, the upgrade process will exit and it is recommended that you continue with your database as an unmanaged component. +If your `QuayEcosystem` was managing the PostgreSQL database, the upgrade process will migrate your data to a new PostgreSQL database managed by the upgraded Operator. Your old database will not be changed or removed but {productname} will no longer use it once the migration is complete. If there are issues during the data migration, the upgrade process will exit and it is recommended that you continue with your database as an unmanaged component. ==== +[id="supported-quayecossytem-configurations-for-upgrades"] === Supported QuayEcosystem Configurations for Upgrades -The Quay Operator will report errors in its logs and in `status.conditions` if migrating a `QuayEcosystem` component fails or is unsupported. All unmanaged components should migrate successfully because no Kubernetes resources need to be adopted and all the necessary values are already provided in Quay's `config.yaml`. +The {productname} Operator reports errors in its logs and in `status.conditions` if migrating a `QuayEcosystem` component fails or is unsupported. All unmanaged components should migrate successfully because no Kubernetes resources need to be adopted and all the necessary values are already provided in {productname}'s `config.yaml` file. *Database* diff --git a/modules/operator-volume-size-overrides.adoc b/modules/operator-volume-size-overrides.adoc index c63296c33..b676d5cd2 100644 --- a/modules/operator-volume-size-overrides.adoc +++ b/modules/operator-volume-size-overrides.adoc @@ -1,8 +1,8 @@ -[[operator-volume-size-overrides]] +:_content-type: REFERENCE +[id="operator-volume-size-overrides"] = Volume size overrides -As of {productname} v3.6.2, you can specify the desired size of storage resources provisioned for managed components. The default size for Clair and Quay PostgreSQL databases is `50Gi`. You can now choose a large enough capacity upfront, either for performance reasons or in the case where your storage backend does not have resize capability. - +You can specify the desired size of storage resources provisioned for managed components. The default size for Clair and the PostgreSQL databases is `50Gi`. You can now choose a large enough capacity upfront, either for performance reasons or in the case where your storage backend does not have resize capability. In the following example, the volume size for the Clair and the Quay PostgreSQL databases has been set to `70Gi`: @@ -29,5 +29,9 @@ spec: - kind: postgres managed: true overrides: - volumeSize: 70Gi + volumeSize: 70Gi + - kind: clairpostgres + managed: true + overrides: + volumeSize: 70Gi ---- \ No newline at end of file diff --git a/modules/optional-enabling-read-only-mode-backup-restore-ocp.adoc b/modules/optional-enabling-read-only-mode-backup-restore-ocp.adoc new file mode 100644 index 000000000..faa78255c --- /dev/null +++ b/modules/optional-enabling-read-only-mode-backup-restore-ocp.adoc @@ -0,0 +1,304 @@ +:_content-type: PROCEDURE +[id="optional-enabling-read-only-mode-backup-restore-ocp"] += Optional: Enabling read-only mode for {productname-ocp} + +Enabling read-only mode for your {productname-ocp} deployment allows you to manage the registry's operations. Administrators can enable read-only mode to restrict write access to the registry, which helps ensure data integrity, mitigate risks during maintenance windows, and provide a safeguard against unintended modifications to registry data. It also helps to ensure that your {productname} registry remains online and available to serve images to users. + +When backing up and restoring, you are required to scale down your {productname-ocp} deployment. This results in service unavailability during the backup period which, in some cases, might be unacceptable. Enabling read-only mode ensures service availability during the backup and restore procedure for {productname-ocp} deployments. + +[NOTE] +==== +In some cases, a read-only option for {productname} is not possible since it requires inserting a service key and other manual configuration changes. As an alternative to read-only mode, {productname} administrators might consider enabling the `DISABLE_PUSHES` feature. When this field is set to `true`, users are unable to push images or image tags to the registry when using the CLI. Enabling `DISABLE_PUSHES` differs from `read-only` mode because the database is not set as `read-only` when it is enabled. + +This field might be useful in some situations such as when {productname} administrators want to calculate their registry's quota and disable image pushing until after calculation has completed. With this method, administrators can avoid putting putting the whole registry in `read-only` mode, which affects the database, so that most operations can still be done. + +For information about enabling this configuration field, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-misc[Miscellaneous configuration fields]. +==== + +.Prerequisites + +* If you are using {rhel} 7.x: +** You have enabled the Red Hat Software Collections List (RHSCL). +** You have installed Python 3.6. +** You have downloaded the `virtualenv` package. +** You have installed the `git` CLI. + +* If you are using {rhel} 8: +** You have installed Python 3 on your machine. +** You have downloaded the `python3-virtualenv` package. +** You have installed the `git` CLI. + +* You have cloned the `https://github.com/quay/quay.git` repository. +* You have installed the `oc` CLI. +* You have access to the cluster with `cluster-admin` privileges. + +[id="creating-service-keys-quay-ocp"] +== Creating service keys for {productname-ocp} + +{productname} uses service keys to communicate with various components. These keys are used to sign completed requests, such as requesting to scan images, login, storage access, and so on. + +.Procedure + +. Enter the following command to obtain a list of {productname} pods: ++ +[source,terminal] +---- +$ oc get pods -n +---- ++ +Example output ++ +[source,terminal] +---- +example-registry-clair-app-7dc7ff5844-4skw5 0/1 Error 0 70d +example-registry-clair-app-7dc7ff5844-nvn4f 1/1 Running 0 31d +example-registry-clair-app-7dc7ff5844-x4smw 0/1 ContainerStatusUnknown 6 (70d ago) 70d +example-registry-clair-app-7dc7ff5844-xjnvt 1/1 Running 0 60d +example-registry-clair-postgres-547d75759-75c49 1/1 Running 0 70d +example-registry-quay-app-76c8f55467-52wjz 1/1 Running 0 70d +example-registry-quay-app-76c8f55467-hwz4c 1/1 Running 0 70d +example-registry-quay-app-upgrade-57ghs 0/1 Completed 1 70d +example-registry-quay-database-7c55899f89-hmnm6 1/1 Running 0 70d +example-registry-quay-mirror-6cccbd76d-btsnb 1/1 Running 0 70d +example-registry-quay-mirror-6cccbd76d-x8g42 1/1 Running 0 70d +example-registry-quay-redis-85cbdf96bf-4vk5m 1/1 Running 0 70d +---- + +. Open a remote shell session to the `Quay` container by entering the following command: ++ +[source,terminal] +---- +$ oc rsh example-registry-quay-app-76c8f55467-52wjz +---- + +. Enter the following command to create the necessary service keys: ++ +[source,terminal] +---- +sh-4.4$ python3 tools/generatekeypair.py quay-readonly +---- ++ +Example output ++ +[source,terminal] +---- +Writing public key to quay-readonly.jwk +Writing key ID to quay-readonly.kid +Writing private key to quay-readonly.pem +---- + +[id="adding-keys-postgresql-database"] +== Adding keys to the PostgreSQL database + +Use the following procedure to add your service keys to the PostgreSQL database. + +.Prerequistes + +* You have created the service keys. + +.Procedure + +. Enter the following command to enter your {productname} database environment: ++ +[source,terminal] +---- +$ oc rsh example-registry-quay-app-76c8f55467-52wjz psql -U -d +---- + +. Display the approval types and associated notes of the `servicekeyapproval` by entering the following command: ++ +[source,terminal] +---- +quay=# select * from servicekeyapproval; +---- ++ +Example output ++ +[source,terminal] +---- + id | approver_id | approval_type | approved_date | notes +----+-------------+----------------------------------+----------------------------+------- + 1 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:47:48.181347 | + 2 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:47:55.808087 | + 3 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:49:04.27095 | + 4 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:49:05.46235 | + 5 | 1 | ServiceKeyApprovalType.SUPERUSER | 2024-05-07 04:05:10.296796 | +... +---- + +. Add the service key to your {productname} database by entering the following query: ++ +[source,terminal] +---- +quay=# INSERT INTO servicekey + (name, service, metadata, kid, jwk, created_date, expiration_date) + VALUES ('quay-readonly', + 'quay', + '{}', + '{}', + '{}', + '{}', + '{}'); +---- ++ +Example output ++ +[source,terminal] +---- +INSERT 0 1 +---- + +. Next, add the key approval with the following query: ++ +[source,terminal] +---- +quay=# INSERT INTO servicekeyapproval ('approval_type', 'approved_date', 'notes') + VALUES ("ServiceKeyApprovalType.SUPERUSER", "CURRENT_DATE", + {include_notes_here_on_why_this_is_being_added}); +---- ++ +Example output ++ +[source,terminal] +---- +INSERT 0 1 +---- + +. Set the `approval_id` field on the created service key row to the `id` field from the created service key approval. You can use the following `SELECT` statements to get the necessary IDs: ++ +[source,terminal] +---- +UPDATE servicekey +SET approval_id = (SELECT id FROM servicekeyapproval WHERE approval_type = 'ServiceKeyApprovalType.SUPERUSER') +WHERE name = 'quay-readonly'; +---- ++ +[source,terminal] +---- +UPDATE 1 +---- + +[id="quay-ocp-readonly"] +== Configuring read-only mode {productname-ocp} + +After the service keys have been created and added to your PostgreSQL database, you must restart the `Quay` container on your {ocp} deployment. + +[IMPORTANT] +==== +Deploying {productname-ocp} in read-only mode requires you to modify the secrets stored inside of your {ocp} cluster. It is highly recommended that you create a backup of the secret prior to making changes to it. +==== + +.Prerequisites + +* You have created the service keys and added them to your PostgreSQL database. + +.Procedure + +. Enter the following command to read the secret name of your {productname-ocp} deployment: ++ +[source,terminal] +---- +$ oc get deployment -o yaml +---- + +. Use the `base64` command to encode the `quay-readonly.kid` and `quay-readonly.pem` files: ++ +[source,terminal] +---- +$ base64 -w0 quay-readonly.kid +---- ++ +Example output ++ +[source,terminal] +---- +ZjUyNDFm... +---- ++ +[source,terminal] +---- +$ base64 -w0 quay-readonly.pem +---- ++ +Example output ++ +[source,terminal] +---- +LS0tLS1CRUdJTiBSU0E... +---- + +. Obtain the current configuration bundle and secret by entering the following command: ++ +[source,terminal] +---- +$ oc get secret quay-config-secret-name -o json | jq '.data."config.yaml"' | cut -d '"' -f2 | base64 -d -w0 > config.yaml +---- + +. Edit the `config.yaml` file and add the following information: ++ +[source,yaml] +---- +# ... +REGISTRY_STATE: readonly +INSTANCE_SERVICE_KEY_KID_LOCATION: 'conf/stack/quay-readonly.kid' +INSTANCE_SERVICE_KEY_LOCATION: 'conf/stack/quay-readonly.pem' +# ... +---- + +. Save the file and `base64` encode it by running the following command: ++ +[source,terminal] +---- +$ base64 -w0 quay-config.yaml +---- + +. Scale down the {productname} Operator pods to `0`. This ensures that the Operator does not reconcile the secret after editing it. ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment quay-operator -n openshift-operators +---- + +. Edit the secret to include the new content: ++ +[source,terminal] +---- +$ oc edit secret quay-config-secret-name -n quay-namespace +---- ++ +[source,yaml] +---- +# ... +data: + "quay-readonly.kid": "ZjUyNDFm..." + "quay-readonly.pem": "LS0tLS1CRUdJTiBSU0E..." + "config.yaml": "QUNUSU9OX0xPR19..." +# ... +---- ++ +With your {productname-ocp} deployment on read-only mode, you can safely manage your registry's operations and perform such actions as backup and restore. + +[id="scaling-up-quay-ocp-read-only-deployment"] +=== Scaling up the {productname-ocp} from a read-only deployment + +When you no longer want {productname-ocp} to be in read-only mode, you can scale the deployment back up and remove the content added from the secret. + +.Procedure + +. Edit the `config.yaml` file and remove the following information: ++ +[source,yaml] +---- +# ... +REGISTRY_STATE: readonly +INSTANCE_SERVICE_KEY_KID_LOCATION: 'conf/stack/quay-readonly.kid' +INSTANCE_SERVICE_KEY_LOCATION: 'conf/stack/quay-readonly.pem' +# ... +---- + +. Scale the {productname} Operator back up by entering the following command: ++ +[source,terminal] +---- +oc scale --replicas=1 deployment quay-operator -n openshift-operators +---- \ No newline at end of file diff --git a/modules/optional-enabling-read-only-mode-backup-restore-standalone.adoc b/modules/optional-enabling-read-only-mode-backup-restore-standalone.adoc new file mode 100644 index 000000000..b46ec1afa --- /dev/null +++ b/modules/optional-enabling-read-only-mode-backup-restore-standalone.adoc @@ -0,0 +1,315 @@ +:_content-type: PROCEDURE +[id="optional-enabling-read-only-mode-backup-restore-standalone"] += Optional: Enabling read-only mode for {productname} + +Enabling read-only mode for your {productname} deployment allows you to manage the registry's operations. {productname} administrators can enable read-only mode to restrict write access to the registry, which helps ensure data integrity, mitigate risks during maintenance windows, and provide a safeguard against unintended modifications to registry data. It also helps to ensure that your {productname} registry remains online and available to serve images to users. + +[NOTE] +==== +In some cases, a read-only option for {productname} is not possible since it requires inserting a service key and other manual configuration changes. As an alternative to read-only mode, {productname} administrators might consider enabling the `DISABLE_PUSHES` feature. When this field is set to `true`, users are unable to push images or image tags to the registry when using the CLI. Enabling `DISABLE_PUSHES` differs from `read-only` mode because the database is not set as `read-only` when it is enabled. + +This field might be useful in some situations such as when {productname} administrators want to calculate their registry's quota and disable image pushing until after calculation has completed. With this method, administrators can avoid putting putting the whole registry in `read-only` mode, which affects the database, so that most operations can still be done. + +For information about enabling this configuration field, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-misc[Miscellaneous configuration fields]. +==== + +.Prerequisites + +* If you are using {rhel} 7.x: +** You have enabled the Red Hat Software Collections List (RHSCL). +** You have installed Python 3.6. +** You have downloaded the `virtualenv` package. +** You have installed the `git` CLI. + +* If you are using {rhel} 8: +** You have installed Python 3 on your machine. +** You have downloaded the `python3-virtualenv` package. +** You have installed the `git` CLI. + +* You have cloned the `https://github.com/quay/quay.git` repository. + +[id="creating-service-keys"] +== Creating service keys for standalone {productname} + +{productname} uses service keys to communicate with various components. These keys are used to sign completed requests, such as requesting to scan images, login, storage access, and so on. + +.Procedure + +. If your {productname} registry is readily available, you can generate service keys inside of the `Quay` registry container. + +.. Enter the following command to generate a key pair inside of the `Quay` container: ++ +[source,terminal] +---- +$ podman exec quay python3 tools/generatekeypair.py quay-readonly +---- + +. If your {productname} is not readily available, you must generate your service keys inside of a virtual environment. + +.. Change into the directory of your {productname} deployment and create a virtual environment inside of that directory: ++ +[source,terminal] +---- +$ cd <$QUAY>/quay && virtualenv -v venv +---- + +.. Activate the virtual environment by entering the following command: ++ +[source,terminal] +---- +$ source venv/bin/activate +---- + +.. Optional. Install the `pip` CLI tool if you do not have it installed: ++ +[source,terminal] +---- +$ venv/bin/pip install --upgrade pip +---- + +.. In your {productname} directory, create a `requirements-generatekeys.txt` file with the following content: ++ +[source,terminal] +---- +$ cat << EOF > requirements-generatekeys.txt +cryptography==3.4.7 +pycparser==2.19 +pycryptodome==3.9.4 +pycryptodomex==3.9.4 +pyjwkest==1.4.2 +PyJWT==1.7.1 +Authlib==1.0.0a2 +EOF +---- + +.. Enter the following command to install the Python dependencies defined in the `requirements-generatekeys.txt` file: ++ +[source,terminal] +---- +$ venv/bin/pip install -r requirements-generatekeys.txt +---- + +.. Enter the following command to create the necessary service keys: ++ +[source,terminal] +---- +$ PYTHONPATH=. venv/bin/python //tools/generatekeypair.py quay-readonly +---- ++ +Example output ++ +[source,terminal] +---- +Writing public key to quay-readonly.jwk +Writing key ID to quay-readonly.kid +Writing private key to quay-readonly.pem +---- + +.. Enter the following command to deactivate the virtual environment: ++ +[source,terminal] +---- +$ deactivate +---- + +[id="adding-keys-postgresql-database"] +== Adding keys to the PostgreSQL database + +Use the following procedure to add your service keys to the PostgreSQL database. + +.Prerequistes + +* You have created the service keys. + +.Procedure + +. Enter the following command to enter your {productname} database environment: ++ +[source,terminal] +---- +$ podman exec -it postgresql-quay psql -U postgres -d quay +---- + +. Display the approval types and associated notes of the `servicekeyapproval` by entering the following command: ++ +[source,terminal] +---- +quay=# select * from servicekeyapproval; +---- ++ +Example output ++ +[source,terminal] +---- + id | approver_id | approval_type | approved_date | notes +----+-------------+----------------------------------+----------------------------+------- + 1 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:47:48.181347 | + 2 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:47:55.808087 | + 3 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:49:04.27095 | + 4 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:49:05.46235 | + 5 | 1 | ServiceKeyApprovalType.SUPERUSER | 2024-05-07 04:05:10.296796 | +... +---- + +. Add the service key to your {productname} database by entering the following query: ++ +[source,terminal] +---- +quay=# INSERT INTO servicekey + (name, service, metadata, kid, jwk, created_date, expiration_date) + VALUES ('quay-readonly', + 'quay', + '{}', + '{}', + '{}', + '{}', + '{}'); +---- ++ +Example output ++ +[source,terminal] +---- +INSERT 0 1 +---- + +. Next, add the key approval with the following query: ++ +[source,terminal] +---- +quay=# INSERT INTO servicekeyapproval ('approval_type', 'approved_date', 'notes') + VALUES ("ServiceKeyApprovalType.SUPERUSER", "CURRENT_DATE", + {include_notes_here_on_why_this_is_being_added}); +---- ++ +Example output ++ +[source,terminal] +---- +INSERT 0 1 +---- + +. Set the `approval_id` field on the created service key row to the `id` field from the created service key approval. You can use the following `SELECT` statements to get the necessary IDs: ++ +[source,terminal] +---- +UPDATE servicekey +SET approval_id = (SELECT id FROM servicekeyapproval WHERE approval_type = 'ServiceKeyApprovalType.SUPERUSER') +WHERE name = 'quay-readonly'; +---- ++ +[source,terminal] +---- +UPDATE 1 +---- + +[id="quay-standalone-readonly"] +== Configuring read-only mode for standalone {productname} + +After the service keys have been created and added to your PostgreSQL database, you must restart the `Quay` container on your standalone deployment. + +.Prerequisites + +* You have created the service keys and added them to your PostgreSQL database. + +.Procedure + +. Shutdown all {productname} instances on all virtual machines. For example: ++ +[source,terminal] +---- +$ podman stop +---- ++ +[source,terminal] +---- +$ podman stop +---- + +. Enter the following command to copy the contents of the `quay-readonly.kid` file and the `quay-readonly.pem` file to the directory that holds your {productname} configuration bundle: ++ +[source,terminal] +---- +$ cp quay-readonly.kid quay-readonly.pem $Quay/config +---- + +. Enter the following command to set file permissions on all files in your configuration bundle folder: ++ +[source,terminal] +---- +$ setfacl -m user:1001:rw $Quay/config/* +---- + +. Modify your {productname} `config.yaml` file and add the following information: ++ +[source,terminal] +---- +# ... +REGISTRY_STATE: readonly +INSTANCE_SERVICE_KEY_KID_LOCATION: 'conf/stack/quay-readonly.kid' +INSTANCE_SERVICE_KEY_LOCATION: 'conf/stack/quay-readonly.pem' +# ... +---- + +. Distribute the new configuration bundle to all {productname} instances. + +. Start {productname} by entering the following command: ++ +[source,terminal] +---- +$ podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay-main-app \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- + +. After starting {productname}, a banner inside in your instance informs users that {productname} is running in read-only mode. Pushes should be rejected and a 405 error should be logged. You can test this by running the following command: ++ +[source,terminal] +---- +$ podman push /quayadmin/busybox:test +---- ++ +Example output ++ +[source,terminal] +---- +613be09ab3c0: Preparing +denied: System is currently read-only. Pulls will succeed but all write operations are currently suspended. +---- ++ +With your {productname} deployment on read-only mode, you can safely manage your registry's operations and perform such actions as backup and restore. + +. Optional. After you are finished with read-only mode, you can return to normal operations by removing the following information from your `config.yaml` file. Then, restart your {productname} deployment: ++ +[source,terminal] +---- +# ... +REGISTRY_STATE: readonly +INSTANCE_SERVICE_KEY_KID_LOCATION: 'conf/stack/quay-readonly.kid' +INSTANCE_SERVICE_KEY_LOCATION: 'conf/stack/quay-readonly.pem' +# ... +---- ++ +[source,terminal] +---- +$ podman restart +---- + +[id="updating-read-only-expiration-time"] +== Updating read-only expiration time + +The {productname} read-only key has an expiration date, and when that date passes the key is deactivated. Before the key expires, its expiration time can be updated in the database. To update the key, connect your {productname} production database using the methods described earlier and issue the following query: + +[source,terminal] +---- +quay=# UPDATE servicekey SET expiration_date = 'new-date' WHERE id = servicekey_id; +---- + +The list of service key IDs can be obtained by running the following query: + +[source,terminal] +---- +SELECT id, name, expiration_date FROM servicekey; +---- \ No newline at end of file diff --git a/modules/oras-annotation-parsing.adoc b/modules/oras-annotation-parsing.adoc new file mode 100644 index 000000000..9817f282d --- /dev/null +++ b/modules/oras-annotation-parsing.adoc @@ -0,0 +1,166 @@ +:_content-type: CONCEPT +[id="annotation-parsing-oras"] += Annotation parsing + +Some OCI media types do not utilize labels and, as such, critical information such as expiration timestamps are not included. {productname} supports metadata passed through annotations to accommodate OCI media types that do not include these labels for metadata transmission. Tools such as ORAS (OCI Registry as Storage) can now be used to embed information with artifact types to help ensure that images operate properly, for example, to expire. + +The following procedure uses ORAS to add an expiration date to an OCI media artifact. + +[IMPORTANT] +==== +If you pushed an image with `podman push`, and then add an annotation with `oras`, the MIME type is changed. Consequently, you will not be able to pull the same image with `podman pull` because Podman does not recognize that MIME type. +==== + +.Prerequisites + +* You have downloaded the `oras` CLI. For more information, see link:https://oras.land/docs/installation[Installation]. +* You have pushed an OCI media artifact to your {productname} repository. + +//// +.Procedure + +. Create an example artifact: ++ +[source,terminal] +---- +$ cat < Dockerfile +FROM alpine +CMD echo 'hello world!' +EOF +---- + +. Create an artifact directory: ++ +[source,terminal] +---- +$ mkdir +---- + +. Build the Docker image by using Podman. For example: ++ +[source,terminal] +---- +$ podman build -t . +---- ++ +.Example output ++ +[source,terminal] +---- +STEP 1/2: FROM alpine +STEP 2/2: CMD echo 'hello world!' +--> Using cache a5c9903200482a77ad9be3234962d2eac4dfef0b35d08eb4e966bf0125e0679b +COMMIT myartifact-image +--> a5c990320048 +Successfully tagged localhost/myartifact-image:latest +Successfully tagged localhost/hello-world:v1 +a5c9903200482a77ad9be3234962d2eac4dfef0b35d08eb4e966bf0125e0679b +---- + +. Confirm that the image has been built: ++ +[source,terminal] +---- +$ podman images +---- ++ +.Example output ++ +[source,terminal] +---- +REPOSITORY TAG IMAGE ID CREATED SIZE +localhost/ latest a5c990320048 18 hours ago 8.08 MB +---- + +. Convert the image to an OCI artifact. For example: ++ +[source,terminal] +---- +$ podman save --format oci-archive -o myartifact-image.tar +---- + +. Inspect the image to ensure that it follows the OCI format: ++ +[source,terminal] +---- +$ podman inspect myartifact-image +---- ++ +.Example output ++ +[source,terminal] +---- +--- +[ + { + "Id": "a5c9903200482a77ad9be3234962d2eac4dfef0b35d08eb4e966bf0125e0679b", + "Digest": "sha256:cc9c20f447dfd2b30019a44290d451a2edc5dec51736d29b5697c23fe7e55afb", + "RepoTags": [ + "localhost/myartifact-image:latest", + "localhost/hello-world:v1" +--- +---- + +. Tag the image by entering the following command: ++ +[source,terminal] +---- +$ podman tag quay.io///: +---- + +. Push the artifact to your {productname} registry. For example: ++ +[source,terminal] +---- +$ podman push quay.io///: +---- +//// + +.Procedure + +* By default, some OCI media types, like `application/vnd.oci.image.manifest.v1+json`, do not use certain labels, like expiration timestamps. You can use a CLI tool like ORAS (`oras`) to add annotations to OCI media types. For example: ++ +[source,terminal] +---- +$ oras push --annotation "quay.expires-after=2d" \ <1> +--annotation "expiration = 2d" \ <2> +quay.io///: +---- +<1> Set the expiration time for 2 days, indicated by `2d`. +<2> Adds the expiration label. ++ +.Example output ++ +[source,terminal] +---- +✓ Exists application/vnd.oci.empty.v1+json 2/2 B 100.00% 0s + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 561/561 B 100.00% 511ms + └─ sha256:9b4f2d43b62534423894d077f0ff0e9e496540ec8b52b568ea8b757fc9e7996b +Pushed [registry] quay.io/stevsmit/testorg3/oci-image:v1 +ArtifactType: application/vnd.unknown.artifact.v1 +Digest: sha256:9b4f2d43b62534423894d077f0ff0e9e496540ec8b52b568ea8b757fc9e7996b +---- + +.Verification + +. Pull the image with `oras`. For example: ++ +[source,terminal] +---- +$ oras pull quay.io///: +---- + +. Inspect the changes using `oras`. For example: ++ +[source,terminal] +---- +$ oras manifest fetch quay.io///: +---- ++ +.Example output ++ +[source,terminal] +---- +{"schemaVersion":2,"mediaType":"application/vnd.oci.image.manifest.v1+json","artifactType":"application/vnd.unknown.artifact.v1","config":{"mediaType":"application/vnd.oci.empty.v1+json","digest":"sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a","size":2,"data":"e30="},"layers":[{"mediaType":"application/vnd.oci.empty.v1+json","digest":"sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a","size":2,"data":"e30="}],"annotations":{"org.opencontainers.image.created":"2024-07-11T15:22:42Z","version ":" 8.11"}} +---- \ No newline at end of file diff --git a/modules/org-application-create-api.adoc b/modules/org-application-create-api.adoc new file mode 100644 index 000000000..a01f30c8d --- /dev/null +++ b/modules/org-application-create-api.adoc @@ -0,0 +1,110 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-application-create-api"] += Creating an organization application by using the {productname} API + +Organization applications can be created by using the {productname} UI. + +[NOTE] +==== +Organization applications can be created by using the UI, however OAuth 2 access tokens must be created on the UI. +==== + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationapplication[`POST /api/v1/organization/{orgname}/applications`] endpoint to create a new application for your organization. For example: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//applications" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "", + "redirect_uri": "", + "application_uri": "", + "description": "", + "avatar_email": "" + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "new-application", "description": "", "application_uri": "", "client_id": "E6GJSHOZMFBVNHTHNB53", "client_secret": "SANSWCWSGLVAUQ60L4Q4CEO3C1QAYGEXZK2VKJNI", "redirect_uri": "", "avatar_email": null} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationapplications[`GET /api/v1/organization/{orgname}/applications`] endpoint to return a list of all organization applications. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//applications" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"applications": [{"name": "test", "description": "", "application_uri": "", "client_id": "MCJ61D8KQBFS2DXM56S2", "client_secret": "J5G7CCX5QCA8Q5XZLWGI7USJPSM4M5MQHJED46CF", "redirect_uri": "", "avatar_email": null}, {"name": "new-token", "description": "", "application_uri": "", "client_id": "IG58PX2REEY9O08IZFZE", "client_secret": "2LWTWO89KH26P2CO4TWFM7PGCX4V4SUZES2CIZMR", "redirect_uri": "", "avatar_email": null}, {"name": "second-token", "description": "", "application_uri": "", "client_id": "6XBK7QY7ACSCN5XBM3GS", "client_secret": "AVKBOUXTFO3MXBBK5UJD5QCQRN2FWL3O0XPZZT78", "redirect_uri": "", "avatar_email": null}, {"name": "new-application", "description": "", "application_uri": "", "client_id": "E6GJSHOZMFBVNHTHNB53", "client_secret": "SANSWCWSGLVAUQ60L4Q4CEO3C1QAYGEXZK2VKJNI", "redirect_uri": "", "avatar_email": null}]} +---- ++ +Applications can also be returned for a specific client using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationapplication[`GET /api/v1/organization/{orgname}/applications/{client_id}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//applications/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test", "description": "", "application_uri": "", "client_id": "MCJ61D8KQBFS2DXM56S2", "client_secret": "J5G7CCX5QCA8Q5XZLWGI7USJPSM4M5MQHJED46CF", "redirect_uri": "", "avatar_email": null} +---- + +. After creation, organization applications can be updated, for example, if you want to add a redirect URI or a new description, using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationapplication[`PUT /api/v1/organization/{orgname}/applications/{client_id}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/organization/test/applications/12345" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Updated Application Name", + "redirect_uri": "https://example.com/oauth/callback", + "application_uri": "https://example.com", + "description": "Updated description for the application", + "avatar_email": "avatar@example.com" + }' +---- + +. After creation, application information can be returned by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getapplicationinformation[`GET /api/v1/app/{client_id}`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/app/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "new-application3", "description": "", "uri": "", "avatar": {"name": "new-application3", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "app"}, "organization": {"name": "test", "email": "new-contact@test-org.com", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "user"}, "is_admin": true, "is_member": true, "teams": {}, "ordered_teams": [], "invoice_email": true, "invoice_email_address": "billing@test-org.com", "tag_expiration_s": 1209600, "is_free_account": true, "quotas": [{"id": 2, "limit_bytes": 10737418240, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}]}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}}} +---- + +. Organization applications can be deleted with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationapplication[`DELETE /api/v1/organization/{orgname}/applications/{client_id}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization/{orgname}/applications/{client_id}" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/org-create-api.adoc b/modules/org-create-api.adoc new file mode 100644 index 000000000..822aa47fc --- /dev/null +++ b/modules/org-create-api.adoc @@ -0,0 +1,53 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-create-api"] += Creating an organization by using the {productname} API + +Use the following procedure to create a new organization using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to create a new organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganization[`POST /api/v1/organization/`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "name": "" + }' "https:///api/v1/organization/" +---- ++ +Example output ++ +[source,terminal] +---- +"Created" +---- + +. After creation, organization details can be changed, such as adding an email address, with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationdetails[`PUT /api/v1/organization/{orgname}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "email": "", + "invoice_email": , + "invoice_email_address": "" + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test", "email": "new-contact@test-org.com", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "user"}, "is_admin": true, "is_member": true, "teams": {"owners": {"name": "owners", "description": "", "role": "admin", "avatar": {"name": "owners", "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", "color": "#c7c7c7", "kind": "team"}, "can_view": true, "repo_count": 0, "member_count": 1, "is_synced": false}}, "ordered_teams": ["owners"], "invoice_email": true, "invoice_email_address": "billing@test-org.com", "tag_expiration_s": 1209600, "is_free_account": true, "quotas": [{"id": 2, "limit_bytes": 10737418240, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}]}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}} +---- \ No newline at end of file diff --git a/modules/org-create.adoc b/modules/org-create.adoc index bf8d6641d..bbf07e298 100644 --- a/modules/org-create.adoc +++ b/modules/org-create.adoc @@ -1,16 +1,26 @@ -[[org-create]] -= Creating organization accounts - -Any user can create their own organization to share repositories of -container images. To create a new organization: - -. While logged in as any user, select the plus sign (+) from the upper -right corner of the home page and choose New Organization. -. Type the name of the organization. The name must be alphanumeric, all -lower case, and between 2 and 255 characters long -. Select Create Organization. The new organization appears, ready for you -to begin adding repositories, teams, robot accounts and other features -from icons on the left column. The following figure shows an example of the new -organization’s page with the settings tab selected. -+ -image:new-org.png[Create new repos and teams from an Organization page] \ No newline at end of file +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="org-create"] += Creating an organization by using the UI + +Use the following procedure to create a new organization by using the UI. + +.Procedure + +. Log in to your {productname} registry. + +. Click *Organization* in the navigation pane. + +. Click *Create Organization*. + +. Enter an *Organization Name*, for example, `testorg`. + +. Enter an *Organization Email*. + +. Click *Create*. + +Now, your example organization should populate under the *Organizations* page. \ No newline at end of file diff --git a/modules/org-delete-api.adoc b/modules/org-delete-api.adoc new file mode 100644 index 000000000..1f0c6eeb5 --- /dev/null +++ b/modules/org-delete-api.adoc @@ -0,0 +1,41 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-delete-api"] += Deleting an organization by using the {productname} API + +Use the following procedure to delete an organization using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to delete an organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteadminedorganization[`DELETE /api/v1/organization/{orgname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/organization/" +---- + +. The CLI does not return information when deleting an organization from the CLI. To confirm deletion, you can check the {productname} UI, or you can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganization[`GET /api/v1/organization/{orgname}`] command to see if details are returned for the deleted organization: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization/" +---- ++ +Example output ++ +[source,terminal] +---- +{"detail": "Not Found", "error_message": "Not Found", "error_type": "not_found", "title": "not_found", "type": "http:///api/v1/error/not_found", "status": 404} +---- \ No newline at end of file diff --git a/modules/org-delete.adoc b/modules/org-delete.adoc new file mode 100644 index 000000000..9867a5513 --- /dev/null +++ b/modules/org-delete.adoc @@ -0,0 +1,34 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="org-delete"] += Deleting an organization by using the UI + +Use the following procedure to delete an organization using the v2 UI. + +.Procedure + +. On the *Organizations* page, select the name of the organization you want to delete, for example, `testorg`. + +. Click the *More Actions* drop down menu. + +. Click *Delete*. ++ +[NOTE] +==== +On the *Delete* page, there is a *Search* input box. With this box, users can search for specific organizations to ensure that they are properly scheduled for deletion. For example, if a user is deleting 10 organizations and they want to ensure that a specific organization was deleted, they can use the *Search* input box to confirm said organization is marked for deletion. +==== + +. Confirm that you want to permanently delete the organization by typing *confirm* in the box. + +. Click *Delete*. ++ +After deletion, you are returned to the *Organizations* page. ++ +[NOTE] +==== +You can delete more than one organization at a time by selecting multiple organizations, and then clicking *More Actions* -> *Delete*. +==== \ No newline at end of file diff --git a/modules/org-proxy-cache-configuration-api.adoc b/modules/org-proxy-cache-configuration-api.adoc new file mode 100644 index 000000000..131b0c746 --- /dev/null +++ b/modules/org-proxy-cache-configuration-api.adoc @@ -0,0 +1,66 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-proxy-cache-configuration-api"] += Configuring a proxy cache for an organization by using the {productname} API + +Proxy caching for an organization can be configured by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createproxycacheconfig[`POST /api/v1/organization/{orgname}/proxycache`] endpoint to create a proxy cache configuration for the organization. ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//proxycache" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "upstream_registry": "" + }' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#validateproxycacheconfig[`POST /api/v1/organization/{orgname}/validateproxycache`] endpoint to validate the proxy configuration: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization/{orgname}/validateproxycache" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "upstream_registry": "" + }' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getproxycacheconfig[`GET /api/v1/organization/{orgname}/proxycache`] endpoint to obtain information about the proxcy cache. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization/{orgname}/proxycache" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"upstream_registry": "quay.io", "expiration_s": 86400, "insecure": false} +---- + + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteproxycacheconfig[`DELETE /api/v1/organization/{orgname}/proxycache`] endpoint to ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization/{orgname}/proxycache" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +"Deleted" +---- \ No newline at end of file diff --git a/modules/org-team-member-api.adoc b/modules/org-team-member-api.adoc new file mode 100644 index 000000000..143e9e8af --- /dev/null +++ b/modules/org-team-member-api.adoc @@ -0,0 +1,66 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-member-info-api"] += Retrieving organization member information by using the API + +Information about organization members can be retrieved by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationmembers[`GET /api/v1/organization/{orgname}/members`] to return a list of organization members: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//members" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"members": [{"name": "quayadmin", "kind": "user", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "teams": [{"name": "owners", "avatar": {"name": "owners", "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", "color": "#c7c7c7", "kind": "team"}}], "repositories": ["testrepo"]}, {"name": "testuser", "kind": "user", "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}, "teams": [{"name": "owners", "avatar": {"name": "owners", "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", "color": "#c7c7c7", "kind": "team"}}], "repositories": []}]} +---- + +. You can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationcollaborators[`GET /api/v1/organization/{orgname}/collaborators`] to return a list of organization collaborators: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization/{orgname}/collaborators" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"collaborators": [user-test]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationmember[`GET /api/v1/organization/{orgname}/members/{membername}`] endpoint to obtain more specific information about a user: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//members/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "quayadmin", "kind": "user", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "teams": [{"name": "owners", "avatar": {"name": "owners", "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", "color": "#c7c7c7", "kind": "team"}}], "repositories": ["testrepo"]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#removeorganizationmember[`DELETE /api/v1/organization/{orgname}/members/{membername}`] endpoint to delete a team member. ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//members/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/organization-management-api.adoc b/modules/organization-management-api.adoc new file mode 100644 index 000000000..333984b4b --- /dev/null +++ b/modules/organization-management-api.adoc @@ -0,0 +1,4 @@ +[id="organization-management-api"] += Establishing quota with the {productname} API + +Organizations can be created and managed through API endpoints. With the {productname} API, you can create organizations, view organization information, create proxy caches for an organization, edit users with access to the organization, change organization details, delete organizations, and more. \ No newline at end of file diff --git a/modules/organization-settings-v2-ui.adoc b/modules/organization-settings-v2-ui.adoc new file mode 100644 index 000000000..35798b493 --- /dev/null +++ b/modules/organization-settings-v2-ui.adoc @@ -0,0 +1,40 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="organization-settings-v2-ui"] += Organization settings + +With +ifeval::["{context}" == "quay-io"] += {quayio}, +endif::[] +ifeval::["{context}" == "use-quay"] += {productname}, +endif::[] +some basic organization settings can be adjusted by using the UI. This includes adjusting general settings, such as the e-mail address associated with the organization, and _time machine_ settings, which allows administrators to adjust when a tag is garbage collected after it is permanently deleted. + +Use the following procedure to alter your organization settings by using the v2 UI. + +.Procedure + +. On the v2 UI, click *Organizations*. + +. Click the name of the organization that you will create the robot account for, for example, `test-org`. + +. Click the *Settings* tab. + +. Optional. Enter the email address associated with the organization. + +. Optional. Set the allotted time for the *Time Machine* feature to one of the following: ++ +* *A few seconds* +* *A day* +* *7 days* +* *14 days* +* *A month* + +. Click *Save*. \ No newline at end of file diff --git a/modules/organizations-overview.adoc b/modules/organizations-overview.adoc new file mode 100644 index 000000000..8b46a0948 --- /dev/null +++ b/modules/organizations-overview.adoc @@ -0,0 +1,44 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="organizations-overview"] +ifeval::["{context}" == "quay-io"] += {quayio} organizations overview +endif::[] +ifeval::["{context}" == "use-quay"] += {productname} organizations overview +endif::[] + +In +ifeval::["{context}" == "quay-io"] += {quayio} +endif::[] +ifeval::["{context}" == "use-quay"] += {productname} +endif::[] +an organization is a grouping of users, repositories, and teams. It provides a means to organize and manage access control and permissions within the registry. With organizations, administrators can assign roles and permissions to users and teams. Other useful information about organizations includes the following: + +* You cannot have an organization embedded within another organization. To subdivide an +organization, you use teams. + +* Organizations cannot contain users directly. You must first add a team, and then add one or more users to each team. ++ +[NOTE] +==== +Individual users can be added to specific repositories inside of an organization. Consequently, those users are not members of any team on the *Repository Settings* page. The *Collaborators View* on the *Teams and Memberships* page shows users who have direct access to specific repositories within the organization without needing to be part of that organization specifically. +==== + +* Teams can be set up in organizations as just members who use the repositories and +associated images, or as administrators with special privileges for managing +the Organization. + +ifeval::["{context}" == "quay-io"] +Users can create their own organization to share repositories of container images. This can be done through the {quayio} UI. +endif::[] +ifeval::["{context}" == "use-quay"] +Users can create their own organization to share repositories of container images. This can be done through the {productname} UI, or by the {productname} API if you have an OAuth token. +endif::[] + diff --git a/modules/other-oci-artifacts-with-quay.adoc b/modules/other-oci-artifacts-with-quay.adoc index 7fe7af55e..5b4cec0a8 100644 --- a/modules/other-oci-artifacts-with-quay.adoc +++ b/modules/other-oci-artifacts-with-quay.adoc @@ -1,9 +1,39 @@ -[[other-oci-artifacts-with-quay]] -= Adding other OCI media types to Quay +// Document included in the following assemblies: -Helm, cosign, and ztsd compression scheme artifacts are built into {productname} 3.6 by default. For any other OCI media type that is not supported by default, you can add them to the `ALLOWED_OCI_ARTIFACT_TYPES` configuration in Quay's config.yaml using the following format: +// Configuring Red hat Quay -.... +:_content-type: REFERENCE +[id="other-oci-artifacts-with-quay"] += Open Container Initiative configuration fields + +.Additional OCI artifact configuration field +[cols="3a,1a,2a",options="header"] +|=== +|Field |Type |Description +|**FEATURE_REFERRERS_API** |Boolean| Enables OCI 1.1's referrers API. +|=== + +.Example OCI referrers enablement YAML +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: True +# ... +---- + + +//// +[id="configuring-oci-artifact-types"] +== Configuring additional artifact types + +Other OCI artifact types that are not supported by default can be added to your {productname} deployment by using the `ALLOWED_OCI_ARTIFACT_TYPES` configuration field. + +Use the following reference to add additional OCI artifact types: + +.OCI artifact types configuration +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true ALLOWED_OCI_ARTIFACT_TYPES: : - @@ -12,13 +42,13 @@ ALLOWED_OCI_ARTIFACT_TYPES: : - - -... -.... +---- -For example, you can add Singularity (SIF) support by adding the following to your config.yaml: +For example, you can add Singularity (SIF) support by adding the following to your `config.yaml` file: -.... -... +.Example OCI artifact type configuration +[source,yaml] +---- ALLOWED_OCI_ARTIFACT_TYPES: application/vnd.oci.image.config.v1+json: - application/vnd.dev.cosign.simplesigning.v1+json @@ -26,10 +56,9 @@ ALLOWED_OCI_ARTIFACT_TYPES: - application/tar+gzip application/vnd.sylabs.sif.config.v1+json: - application/vnd.sylabs.sif.layer.v1+tar -... -.... - +---- [NOTE] ==== -When adding OCI media types that are not configured by default, users will also need to manually add support for cosign and Helm if desired. The ztsd compression scheme is supported by default, so users will not need to add that OCI media type to their config.yaml to enable support. +When adding OCI artifact types that are not configured by default, {productname} administrators will also need to manually add support for cosign and Helm if desired. ==== +//// \ No newline at end of file diff --git a/modules/poc-creating-dual-stack-cn.adoc b/modules/poc-creating-dual-stack-cn.adoc new file mode 100644 index 000000000..c8820f834 --- /dev/null +++ b/modules/poc-creating-dual-stack-cn.adoc @@ -0,0 +1,16 @@ +:_content-type: PROCEDURE +[id="poc-creating-dual-stack-cn"] += Creating a dual-stack container network + +Use the following procedure to create a new container network that is dual-stack. + +.Procedure + +* Unless there is a requirement that the _default_ container network use both IPv4 and IPv6, it is suggested that a _new_ container network is created that is dual-stack. As a root user, create a new container network that is dual-stack by running the following command: ++ +[source,terminal] +---- +# podman network create ip-dual-stack --ipv6 +---- ++ +With this command, new containers use this network are a natively dual-stack. \ No newline at end of file diff --git a/modules/prepare-ocp-for-bare-metal-builds.adoc b/modules/prepare-ocp-for-bare-metal-builds.adoc new file mode 100644 index 000000000..979cf81b1 --- /dev/null +++ b/modules/prepare-ocp-for-bare-metal-builds.adoc @@ -0,0 +1,226 @@ +:_content-type: PROCEDURE +[id="prepare-ocp-for-bare-metal-builds"] += Configuring bare metal builds for {productname-ocp} + +Use the following procedure to configure _bare metal builds_ for {productname-ocp}. + +[NOTE] +==== +If you are using the {productname} Operator on {ocp} with a managed `route` component in your `QuayRegistry` CRD, see "{productname-ocp} _builds_ limitations with self-managed _routes_". +==== + +.Prerequisites + +* You have an {ocp} cluster provisioned with the {productname} Operator running. +* You have set the `tls` component to `unmanaged` and uploaded custom SSL/TLS certificates to the {productname} Operator. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#ssl-tls-quay-overview[SSL and TLS for {productname}]. +* You are logged into {ocp} as a cluster administrator. + +.Procedure + +. Enter the following command to create a project where Builds will be run, for example, `bare-metal-builder`: ++ +[source,terminal] +---- +$ oc new-project bare-metal-builder +---- + +. Create a new `ServiceAccount` in the the `bare-metal-builder` namespace by entering the following command: ++ +[source,terminal] +---- +$ oc create sa -n bare-metal-builder quay-builder +---- + +. Enter the following command to grant a user the `edit` role within the `bare-metal-builder` namespace: ++ +[source,terminal] +---- +$ oc policy add-role-to-user -n bare-metal-builder edit system:serviceaccount:bare-metal-builder:quay-builder +---- + +. Enter the following command to retrieve a token associated with the `quay-builder` service account in the `bare-metal-builder` namespace. This token is used to authenticate and interact with the {ocp} cluster's API server. + +.. If your {ocp} cluster is version 4.11+, enter the following command: ++ +[source,terminal] +---- +oc create token quay-builder -n bare-metal-builder --duration 24h +---- + +.. If your {ocp} cluster is earlier than version 4.11, for example, version 4.10, enter the following command: ++ +[source,terminal] +---- +$ oc sa get-token -n bare-metal-builder quay-builder +---- + +. Identify the URL for the {ocp} cluster's API server. This can be found in the {ocp} web console. + +. Identify a worker node label to be used when scheduling _build jobs_. Because _build pods_ must run on bare metal worker nodes, typically these are identified with specific labels. ++ +Check with your cluster administrator to determine exactly which node label should be used. + +. Obtain the Kube API Server's certificate authority (CA) to add to {productname}'s extra certificates. + +.. On {ocp} versions 4.15+, enter the following commands to obtain the name of the secret containing the CA: ++ +[source,terminal] +---- +$ oc extract cm/kube-root-ca.crt -n openshift-apiserver +---- ++ +[source,terminal] +---- +$ mv ca.crt build_cluster.crt +---- + +.. On {ocp} versions earlier than 4.15, for example, 4.14, enter the following command: ++ +[source,terminal] +---- +$ oc get sa openshift-apiserver-sa --namespace=openshift-apiserver -o json | jq '.secrets[] | select(.name | contains("openshift-apiserver-sa-token"))'.name +---- + +.. Obtain the `ca.crt` key value from the secret in the {ocp} Web Console. The value begins with *"-----BEGIN CERTIFICATE-----"`*. + +.. Import the CA to {productname}. Ensure that the name of this file matches the `K8S_API_TLS_CA` field used in Step 9. + +. Create the following `SecurityContextConstraints` resource for the `ServiceAccount`: ++ +[source,yaml] +---- +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: quay-builder +priority: null +readOnlyRootFilesystem: false +requiredDropCapabilities: null +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: +- '*' +supplementalGroups: + type: RunAsAny +volumes: +- '*' +allowHostDirVolumePlugin: true +allowHostIPC: true +allowHostNetwork: true +allowHostPID: true +allowHostPorts: true +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: +- '*' +allowedUnsafeSysctls: +- '*' +defaultAddCapabilities: null +fsGroup: + type: RunAsAny +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: quay-builder-scc + namespace: bare-metal-builder +rules: +- apiGroups: + - security.openshift.io + resourceNames: + - quay-builder + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: quay-builder-scc + namespace: bare-metal-builder +subjects: +- kind: ServiceAccount + name: quay-builder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: quay-builder-scc +---- + +. Update the `config.yaml` file of your {productname-ocp} deployment to include an appropriate _bare metal builds_ configuration by using the {ocp} web console. + +.. Click *Operators* -> *Installed Operators* -> *Red Hat Quay* -> *Quay Registry*. + +.. Click the name of your registry, for example, *example-registry*. + +.. Under *Config Bundle Secret*, click the name of your configuration bundle, for example, *extra-ca-certificate-config-bundle-secret*. + +.. Click *Actions* -> *Edit Secret*. + +.. Add the following information to your {productname} `config.yaml` file, replacing each value with information that is relevant to your specific installation: ++ +[source,yaml] +---- +FEATURE_USER_INITIALIZE: true +BROWSER_API_CALLS_XHR_ONLY: false +SUPER_USERS: +- +FEATURE_USER_CREATION: false +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_BUILD_SUPPORT: True +BUILDMAN_HOSTNAME: ${BUILDMAN_HOSTNAME}:443 <1> +BUILD_MANAGER: +- ephemeral +- ALLOWED_WORKER_COUNT: 10 + ORCHESTRATOR_PREFIX: buildman/production/ + ORCHESTRATOR: + REDIS_HOST: <2> + REDIS_PASSWORD: "" + REDIS_SSL: false + REDIS_SKIP_KEYSPACE_EVENT_SETUP: false + EXECUTORS: + - EXECUTOR: kubernetes + BUILDER_NAMESPACE: <3> + K8S_API_SERVER: <4> + K8S_API_TLS_CA: <5> + VOLUME_SIZE: 8G + KUBERNETES_DISTRIBUTION: openshift + CONTAINER_MEMORY_LIMITS: 1G <6> + CONTAINER_CPU_LIMITS: 300m <7> + CONTAINER_MEMORY_REQUEST: 1G <8> + CONTAINER_CPU_REQUEST: 300m <9> + NODE_SELECTOR_LABEL_KEY: beta.kubernetes.io/instance-type + NODE_SELECTOR_LABEL_VALUE: n1-standard-4 + CONTAINER_RUNTIME: podman + SERVICE_ACCOUNT_NAME: + SERVICE_ACCOUNT_TOKEN: <10> + QUAY_USERNAME: + QUAY_PASSWORD: + WORKER_IMAGE: /quay-quay-builder + WORKER_TAG: + BUILDER_VM_CONTAINER_IMAGE: registry.redhat.io/quay/quay-builder-qemu-rhcos-rhel8:v3.9.10-4 + SETUP_TIME: 180 + MINIMUM_RETRY_THRESHOLD: 0 + SSH_AUTHORIZED_KEYS: <11> + - + - + HTTP_PROXY: + HTTPS_PROXY: + NO_PROXY: +---- +<1> Obtained by running the following command: `$ oc get route quayregistry-quay-builder -n ${QUAY_PROJECT} -o jsonpath='{.spec.host}'`. +<2> The hostname for your Redis service. +<3> Set to match the name of your _bare metal builds_ namespace. This example used `bare-metal-builder`. +<4> The `K8S_API_SERVER` is obtained by running `$ oc cluster-info`. +<5> You must manually create and add your custom CA cert, for example, `K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build-cluster.crt`. +<6> Defaults to `5120Mi` if left unspecified. +<7> Defaults to `1000m` if left unspecified. +<8> Defaults to `3968Mi` if left unspecified. +<9> Defaults to `500m` if left unspecified. +<10> Obtained when running `$ oc create sa`. +<11> Allows public SSH keys to be added to the build environment for remote troubleshooting access. This key, or keys, should correspond to the private key that an admin or developer will use to SSH into the build worker for debugging purposes. This key can be obtained by establishing an SSH connection to the remote host using a specific SSH key and port. For example: `$ ssh -i /path/to/ssh/key/set/in/ssh_authorized_keys -p 9999 core@localhost`. + +. Restart your {productname} registry to enable the _builds_ feature. \ No newline at end of file diff --git a/modules/preparing-system-deploy-quay.adoc b/modules/preparing-system-deploy-quay.adoc new file mode 100644 index 000000000..f8e812b6f --- /dev/null +++ b/modules/preparing-system-deploy-quay.adoc @@ -0,0 +1,6 @@ + +:_content-type: CONCEPT +[id="preparing-system-deploy-quay"] += Preparing your system to deploy {productname} + +For a proof of concept {productname} deployment, you must configure port mapping, a database, and Redis prior to deploying the registry. Use the following procedures to prepare your system to deploy {productname}. \ No newline at end of file diff --git a/modules/proc_configure-user-settings.adoc b/modules/proc_configure-user-settings.adoc new file mode 100644 index 000000000..12cc46d45 --- /dev/null +++ b/modules/proc_configure-user-settings.adoc @@ -0,0 +1,58 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="use-quay-manage-settings"] += User settings + +The *User Settings* page provides users a way to set their email address, password, account type, set up desktop notifications, select an avatar, delete an account, adjust the _time machine_ setting, and view billing information. + +[id="navigating-user-settings-page"] +== Navigating to the User Settings page + +Use the following procedure to navigate to the *User Settings* page. + +.Procedure + +. On {quayio}, click your username in the header. + +. Select *Account Settings*. You are redirected to the *User Settings* page. + +[id="adjusting-user-settings"] +== Adjusting user settings + +Use the following procedure to adjust user settings. + +.Procedure + +* To change your email address, select the current email address for *Email Address*. In the pop-up window, enter a new email address, then, click *Change Email*. A verification email will be sent before the change is applied. + +* To change your password, click *Change password*. Enter the new password in both boxes, then click *Change Password*. + +* Change the account type by clicking *Individual Account*, or the option next to *Account Type*. In some cases, you might have to leave an organization prior to changing the account type. + +* Adjust your desktop notifications by clicking the option next to *Desktop Notifications*. Users can either enable, or disable, this feature. + +* You can delete an account by clicking *Begin deletion*. You cannot delete an account if you have an active plan, or if you are a member of an organization where you are the only administrator. You must confirm deletion by entering the namespace. ++ +[IMPORTANT] +==== +Deleting an account is not reversible and will delete all of the account's data including repositories, created build triggers, and notifications. +==== + +* You can set the _time machine_ feature by clicking the drop-box next to *Time Machine*. This feature dictates the amount of time after a tag is deleted that the tag is accessible in time machine before being garbage collected. After selecting a time, click *Save Expiration Time*. + +[id="billing-information"] +== Billing information + +You can view billing information on the *User Settings*. In this section, the following information is available: + +* *Current Plan*. This section denotes the current plan {quayio} plan that you are signed up for. It also shows the amount of private repositories you have. + +* *Invoices*. If you are on a paid plan, you can click *View Invoices* to view a list of invoices. + +* *Receipts*. If you are on a paid plan, you can select whether to have receipts for payment emailed to you, another user, or to opt out of receipts altogether. + diff --git a/modules/proc_container-security-operator-setup.adoc b/modules/proc_container-security-operator-setup.adoc index 4af70919f..e52a5b4cd 100644 --- a/modules/proc_container-security-operator-setup.adoc +++ b/modules/proc_container-security-operator-setup.adoc @@ -9,95 +9,114 @@ The link:https://operatorhub.io/operator/container-security-operator[Container S The CSO does not work without {productname} and Clair. ==== -The Container Security Operator (CSO) performs the following features: +The Container Security Operator (CSO) includes the following features: * Watches containers associated with pods on either specified or all namespaces. -* Queries the container registry where the containers came from for vulnerability information (provided that an image's registry supports image scanning, such a a {productname} registry with Clair scanning). +* Queries the container registry where the containers came from for vulnerability information, provided that an image's registry supports image scanning, such a a {productname} registry with Clair scanning. -* Exposes vulnerabilities via the `ImageManifestVuln` object in the Kubernetes API. +* Exposes vulnerabilities through the `ImageManifestVuln` object in the Kubernetes API. [NOTE] ==== To see instructions on installing the CSO on Kubernetes, -select the Install button from the link:https://operatorhub.io/operator/container-security-operator[Container Security OperatorHub.io] page. +select the *Install* button from the link:https://operatorhub.io/operator/container-security-operator[Container Security OperatorHub.io] page. ==== [id="running-cso-openshift"] == Downloading and running the Container Security Operator in {ocp} -Use the following procedure to download the Container Security Operator. +Use the following procedure to download the Container Security Operator (CSO). [NOTE] ==== In the following procedure, the CSO is installed in the `marketplace-operators` namespace. This allows the CSO to be used in all namespaces of your {ocp} cluster. ==== -. Go to Operators -> OperatorHub (select Security) to see the available `Container Security` Operator. +.Procedure -. Select the `Container Security` Operator, then select `Install` -to go to the Create Operator Subscription page. +. On the {ocp} console page, select *Operators* -> *OperatorHub* and search for *Container Security Operator*. + +. Select the Container Security Operator, then select *Install* to go to the *Create Operator Subscription* page. . Check the settings (all namespaces and automatic approval strategy, by default), and select -`Subscribe`. The `Container Security` appears after a few moments on the `Installed Operators` screen. +*Subscribe*. The *Container Security* appears after a few moments on the *Installed Operators* screen. -. Optionally, you can add custom certificates to the CSO. In this example, create a certificate -named quay.crt in the current directory. Then run the following command to add the cert to the CSO (restart -the Operator pod for the new certs to take effect): +. Optional: you can add custom certificates to the CSO. In this example, create a certificate +named `quay.crt` in the current directory. Then, run the following command to add the certificate to the CSO: + -``` +[source,terminal] +---- $ oc create secret generic container-security-operator-extra-certs --from-file=quay.crt -n openshift-operators -``` +---- ++ +[NOTE] +==== +You must restart the Operator pod for the new certificates to take effect. +==== -. Open the OpenShift Dashboard (Home -> Dashboards). A link to -Image Security appears under the status section, with a listing of the number -of vulnerabilities found so far. Select the link to see a Security breakdown, as shown in the following figure: +. Navigate to *Home* -> *Overview*. A link to *Image Vulnerabilities* appears under the status section, with a listing of the number of vulnerabilities found so far. Select the link to see a security breakdown, as shown in the following image: ++ +image:cso-dashboard.png[Access CSO scanning data from the {ocp} dashboard] + -image:cso-dashboard.png[Access SCO scanning data from OpenShift dashboard] +[IMPORTANT] +==== +The Container Security Operator currently provides broken links for Red Hat Security advisories. For example, the following link might be provided: `https://access.redhat.com/errata/RHSA-2023:1842%20https://access.redhat.com/security/cve/CVE-2023-23916`. The `%20` in the URL represents a space character, however it currently results in the combination of the two URLs into one incomplete URL, for example, `https://access.redhat.com/errata/RHSA-2023:1842` and `https://access.redhat.com/security/cve/CVE-2023-23916`. As a temporary workaround, you can copy each URL into your browser to navigate to the proper page. This is a known issue and will be fixed in a future version of {productname}. +==== . You can do one of two things at this point to follow up on any detected vulnerabilities: -+ -* Select the link to the vulnerability. You are taken to the container registry, {productname} or other registry -where the container came from, where you can see information about the vulnerability. The following -figure shows an example of detected vulnerabilities from a Quay.io registry: + +.. Select the link to the vulnerability. You are taken to the container registry, {productname} or other registry where the container came from, where you can see information about the vulnerability. The following figure shows an example of detected vulnerabilities from a Quay.io registry: + image:cso-registry-vulnerable.png[The CSO points you to a registry containing the vulnerable image] + -* Select the namespaces link to go to the ImageManifestVuln screen, -where you can see the name of the selected image -and all namespaces where that image is running. -The following figure indicates that a particular vulnerable image -is running in two namespaces: +.. Select the namespaces link to go to the *Image Manifest Vulnerabilities* page, where you can see the name of the selected image and all namespaces where that image is running. The following figure indicates that a particular vulnerable image is running in two namespaces: + image:cso-namespace-vulnerable.png[View namespaces a vulnerable image is running in] -At this point, you know what images are vulnerable, what -you need to do to fix those vulnerabilities, -and every namespace that the image was run in. So you can: +After executing this procedure, you are made aware of what images are vulnerable, what you must do to fix those vulnerabilities, and every namespace that the image was run in. Knowing this, you can perform the following actions: + +* Alert users who are running the image that they need to correct the vulnerability. +* Stop the images from running by deleting the deployment or the object that started the pod that the image is in. ++ +[NOTE] +==== +If you delete the pod, it might take a few minutes for the vulnerability to reset on the dashboard. +==== -* Alert anyone running the image that -they need to correct the vulnerability -* Stop the images from running (by deleting the deployment -or other object that started the pod the image is in) +[id="query-image-vulnerabilities-from-cli"] +== Querying image vulnerabilities from the CLI -Note that if you do delete the pod, it may take a few minutes -for the vulnerability to reset on the dashboard. +Use the following procedure to query image vulnerabilities from the command line interface (CLI). -== Query image vulnerabilities from the CLI -You can query information on security from the command line. To query for detected vulnerabilities, type: +.Procedure -``` +. Enter the following command to query for detected vulnerabilities: ++ +[source,terminal] +---- $ oc get vuln --all-namespaces +---- ++ +.Example output ++ +[source,terminal] +---- NAMESPACE NAME AGE default sha256.ca90... 6m56s skynet sha256.ca90... 9m37s -``` -To display details for a particular vulnerability, identify one of the -vulnerabilities, along with its namespace and the `describe` option. -This example shows an active container whose image includes an RPM package with a vulnerability: +---- -``` -$ oc describe vuln --namespace mynamespace sha256.ac50e3752... +. Optional. To display details for a particular vulnerability, identify a specific vulnerability and its namespace, and use the `oc describe` command. The following example shows an active container whose image includes an RPM package with a vulnerability: ++ +[source,terminall] +---- +$ oc describe vuln --namespace sha256.ac50e3752... +---- +.Example output ++ +[source,terminal] +---- Name: sha256.ac50e3752... Namespace: quay-enterprise ... @@ -109,4 +128,31 @@ Spec: Versionformat: rpm Vulnerabilities: Description: Network Security Services (NSS) is a set of libraries... -``` +---- + +[id="uninstalling-container-security-operator"] +== Uninstalling the Container Security Operator + +To uninstall the Container Security Operator from your {ocp} deployment, you must uninstall the Operator and delete the `imagemanifestvulns.secscan.quay.redhat.com` custom resource definition (CRD). Without removing the CRD, image vulnerabilities are still reported on the {ocp} *Overview* page. + +.Procedure + +. On the {ocp} web console, click *Operators* -> *Installed Operators*. + +. Click the menu kebab of the Container Security Operator. + +. Click *Uninstall Operator*. Confirm your decision by clicking *Uninstall* in the popup window. + +. Remove the `imagemanifestvulns.secscan.quay.redhat.com` custom resource definition by entering the following command: ++ +[source,terminal] +---- +$ oc delete customresourcedefinition imagemanifestvulns.secscan.quay.redhat.com +---- ++ +.Example output ++ +[source,terminal] +---- +customresourcedefinition.apiextensions.k8s.io "imagemanifestvulns.secscan.quay.redhat.com" deleted +---- \ No newline at end of file diff --git a/modules/proc_creating-ocp-secret-for-oauth-token.adoc b/modules/proc_creating-ocp-secret-for-oauth-token.adoc index a28a107b7..a5c8e2b8b 100644 --- a/modules/proc_creating-ocp-secret-for-oauth-token.adoc +++ b/modules/proc_creating-ocp-secret-for-oauth-token.adoc @@ -1,5 +1,5 @@ :_content-type: PROCEDURE -[[creating-ocp-secret-for-oauth-token]] +[id="creating-ocp-secret-for-oauth-token"] = Creating an {ocp} secret for the OAuth token In this procedure, you will add the previously obtained access token to communicate with your {productname} deployment. The access token will be stored within {ocp} as a secret. diff --git a/modules/proc_creating-quay-integration-cr.adoc b/modules/proc_creating-quay-integration-cr.adoc index 3bf2105e5..bec445e36 100644 --- a/modules/proc_creating-quay-integration-cr.adoc +++ b/modules/proc_creating-quay-integration-cr.adoc @@ -1,6 +1,6 @@ :_content-type: PROCEDURE -[[creating-quay-integration-cr]] -== Creating the QuayIntegration custom resource +[id="creating-quay-integration-cr"] += Creating the QuayIntegration custom resource In this procedure, you will create a `QuayIntegration` custom resource, which can be completed from either the web console or from the command line. @@ -11,7 +11,8 @@ In this procedure, you will create a `QuayIntegration` custom resource, which ca * An {ocp} 4.6 or greater environment for which you have cluster administrator permissions. * Optional: You have installed the OpenShift CLI (oc). -=== Optional: Creating the QuayIntegration custom resource using the CLI +[id="creating-quay-integration-custom-resource-cli"] +== Optional: Creating the QuayIntegration custom resource using the CLI Follow this procedure to create the `QuayIntegration` custom resource using the command line. @@ -43,7 +44,7 @@ $ touch quay-integration.yaml <2> The `credentialsSecret` property refers to the namespace and name of the secret containing the token that was previously created. <3> Replace the `QUAY_URL` with the hostname of your {productname} instance. <4> If {productname} is using self signed certificates, set the property to `insecureRegistry: true`. -+ + For a list of all configuration fields, see "QuayIntegration configuration fields". . Create the `QuayIntegration` custom resource: @@ -52,7 +53,8 @@ For a list of all configuration fields, see "QuayIntegration configuration field $ oc create -f quay-integration.yaml ---- -=== Optional: Creating the QuayIntegration custom resource using the web console +[id="creating-quay-integration-custom-resource-web-console"] +== Optional: Creating the QuayIntegration custom resource using the web console Follow this procedure to create the `QuayIntegration` custom resource using the web console. @@ -70,7 +72,7 @@ Follow this procedure to create the `QuayIntegration` custom resource using the * *Cluster ID*: The ID associated with this cluster. This value should be unique across the entire ecosystem. Defaults to `openshift` if left unspecified. * *Credentials secret*: Refers to the namespace and name of the secret containing the token that was previously created. * *Quay hostname*: The hostname of the Quay registry. -+ -For a list of all configuration fields, see "QuayIntegration configuration fields". + +For a list of all configuration fields, see "link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#quay-integration-config-fields[QuayIntegration configuration fields]". After the `QuayIntegration` custom resource is created, your {ocp} cluster will be linked to your {productname} instance. Organizations within your {productname} registry should be created for the related namespace for the {ocp} environment. diff --git a/modules/proc_deploy_quay_add.adoc b/modules/proc_deploy_quay_add.adoc index a11eaaf79..ddbedb6ff 100644 --- a/modules/proc_deploy_quay_add.adoc +++ b/modules/proc_deploy_quay_add.adoc @@ -53,14 +53,14 @@ the startup process. ==== + [subs="verbatim,attributes"] -``` +---- # sudo podman run --restart=always -p 443:8443 -p 80:8080 \ --sysctl net.core.somaxconn=4096 \ --privileged=true \ -v /mnt/quay/config:/conf/stack:Z \ -v /mnt/quay/storage:/datastorage:Z \ -d {productrepo}/{quayimage}:{productminv} -``` +---- . **Open browser to UI**: Once the `Quay` container has started, go to your web browser and open the URL, to the node running the `Quay` container. @@ -105,12 +105,12 @@ that is currently stored in `/root/ca.crt`. If not, then remove the line that ad `/root/ca.crt` to the container: + [subs="verbatim,attributes"] -``` +---- $ sudo podman run -d --name mirroring-worker \ -v /mnt/quay/config:/conf/stack:Z \ -v /root/ca.crt:/etc/pki/ca-trust/source/anchors/ca.crt \ {productrepo}/{quayimage}:{productminv} repomirror -``` +---- . **Log into config tool**: Log into the {productname} Setup Web UI (config tool). . **Enable repository mirroring**: Scroll down the Repository Mirroring section and select the Enable Repository Mirroring check box, as shown here: diff --git a/modules/proc_deploy_quay_ha_ceph.adoc b/modules/proc_deploy_quay_ha_ceph.adoc index 331f340e6..6921e85d4 100644 --- a/modules/proc_deploy_quay_ha_ceph.adoc +++ b/modules/proc_deploy_quay_ha_ceph.adoc @@ -6,22 +6,22 @@ several other supporting nodes, as follows: * ceph04 - Ceph RGW node * ceph05 - Ceph Ansible administration node -For details on installing Ceph nodes, see link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux[Installing Red Hat Ceph Storage on Red Hat Enterprise Linux]. +For details on installing Ceph nodes, see link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux[Installing Red Hat Ceph Storage on Red Hat Enterprise Linux]. -Once you have set up the Ceph storage cluster, create a Ceph Object Gateway (also referred to as a RADOS gateway). See link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/installation_guide_for_red_hat_enterprise_linux/deploying-red-hat-ceph-storage#installing-the-ceph-object-gateway[Installing the Ceph Object Gateway] for details. +Once you have set up the Ceph storage cluster, create a Ceph Object Gateway (also referred to as a RADOS gateway). See link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/installation_guide_for_red_hat_enterprise_linux/deploying-red-hat-ceph-storage#installing-the-ceph-object-gateway[Installing the Ceph Object Gateway] for details. === Install each Ceph node On ceph01, ceph02, ceph03, ceph04, and ceph05, do the following: -. Review prerequisites for setting up Ceph nodes in link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#requirements-for-installing-rhcs[Requirements for Installing Red Hat Ceph Storage]. In particular: +. Review prerequisites for setting up Ceph nodes in link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#requirements-for-installing-rhcs[Requirements for Installing Red Hat Ceph Storage]. In particular: + -* Decide if you want to use link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#considerations-for-using-a-raid-controller-with-osd-nodes[RAID controllers on OSD nodes]. +* Decide if you want to use link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#considerations-for-using-a-raid-controller-with-osd-nodes[RAID controllers on OSD nodes]. + -* Decide if you want a separate cluster network for your link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#verifying-the-network-configuration-for-red-hat-ceph-storage[Ceph Network Configuration]. +* Decide if you want a separate cluster network for your link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#verifying-the-network-configuration-for-red-hat-ceph-storage[Ceph Network Configuration]. . Prepare OSD storage (ceph01, ceph02, and ceph03 only). Set up the OSD storage on the three OSD nodes (ceph01, ceph02, and ceph03). See OSD Ansible Settings -in link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.2] +in link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.2] for details on supported storage types that you will enter into your Ansible configuration later. For this example, a single, unformatted block device (`/dev/sdb`), that is separate from the operating system, is configured on each @@ -29,7 +29,7 @@ of the OSD nodes. If you are installing on metal, you might want to add an extra . Install Red Hat Enterprise Linux Server edition, as described in the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/installation_guide/[RHEL 7 Installation Guide]. -. Register and subscribe each Ceph node as described in the link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/#registering-red-hat-ceph-storage-nodes-to-cdn-and-attaching-subscriptions[Registering Red Hat Ceph Storage Nodes]. Here is how to subscribe to the necessary repos: +. Register and subscribe each Ceph node as described in the link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/#registering-red-hat-ceph-storage-nodes-to-cdn-and-attaching-subscriptions[Registering Red Hat Ceph Storage Nodes]. Here is how to subscribe to the necessary repos: + ``` @@ -102,7 +102,7 @@ systems: # cp site.yml.sample site.yml ``` . Edit the copied group_vars/all.yml file. See General Ansible Settings in -link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.1] for details. For example: +link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.1] for details. For example: + ``` ceph_origin: repository @@ -115,7 +115,7 @@ public_network: 192.168.122.0/24 + Note that your network device and address range may differ. . Edit the copied `group_vars/osds.yml` file. See the OSD Ansible Settings in -link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.2] for details. In this example, the second disk device (`/dev/sdb`) on each OSD node is +link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.2] for details. In this example, the second disk device (`/dev/sdb`) on each OSD node is used for both data and journal storage: + ``` @@ -203,4 +203,4 @@ Hello World! === Install the Ceph Object Gateway On the Ansible system (ceph05), configure a -Ceph Object Gateway to your Ceph Storage cluster (which will ultimately run on ceph04). See link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/installation_guide_for_red_hat_enterprise_linux/deploying-red-hat-ceph-storage#installing-the-ceph-object-gateway[Installing the Ceph Object Gateway] for details. +Ceph Object Gateway to your Ceph Storage cluster (which will ultimately run on ceph04). See link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/installation_guide_for_red_hat_enterprise_linux/deploying-red-hat-ceph-storage#installing-the-ceph-object-gateway[Installing the Ceph Object Gateway] for details. diff --git a/modules/proc_deploy_quay_ha_lbdb.adoc b/modules/proc_deploy_quay_ha_lbdb.adoc index be721faff..f968a469c 100644 --- a/modules/proc_deploy_quay_ha_lbdb.adoc +++ b/modules/proc_deploy_quay_ha_lbdb.adoc @@ -1,37 +1,40 @@ -== Set up Load Balancer and Database +:_content-type: PROCEDURE +[id="setting-up-load-balancer-database"] +== Setting up the HAProxy load balancer and the PostgreSQL database -On the first two systems (q01 and q02), install the haproxy load balancer and postgresql database. Haproxy will be configured as the access point and load balancer for the following services running on other systems: +Use the following procedure to set up the HAProxy load balancer and the PostgreSQL database. +.Prerequisites + +* You have installed the Podman or Docker CLI. + +.Procedure + +. On the first two systems, `q01` and `q02`, install the HAProxy load balancer and the PostgreSQL database. This configures HAProxy as the access point and load balancer for the following services running on other systems: ++ * {productname} (ports 80 and 443 on B systems) * Redis (port 6379 on B systems) * RADOS (port 7480 on C systems) -Because the services on the two systems run as containers, you will use `podman`, if it is installed. Alternatively, you could use the equivalent `docker` commands. - -[NOTE] -==== -For more information on using `podman` and restarting containers, see the section "Using podman" earlier in this document. -==== - -Here is how to set up the A systems: //. **Install and start docker service**: Install, start, and enable the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#getting_docker_in_rhel_7[docker service]. -. **Open ports for haproxy service**: Open all haproxy ports in SELinux and selected haproxy ports in the firewall: - +. Open all HAProxy ports in SELinux and selected HAProxy ports in the firewall: + -``` +[source,terminal] +---- # setsebool -P haproxy_connect_any=on # firewall-cmd --permanent --zone=public --add-port=6379/tcp --add-port=7480/tcp success # firewall-cmd --reload success -``` +---- + //. **Set up link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/load_balancer_administration/index#install_haproxy_example1[haproxy service]**: Configure the `/etc/haproxy/haproxy.cfg` to point to the systems and ports providing the {productname}, Redis, and Ceph RADOS services. Here are examples of defaults and added frontend and backend settings: -. **Set up haproxy service**: Configure the `/etc/haproxy/haproxy.cfg` to point to the systems and ports providing the {productname}, Redis, and Ceph RADOS services. Here are examples of defaults and added frontend and backend settings: +. Configure the `/etc/haproxy/haproxy.cfg` to point to the systems and ports providing the {productname}, Redis and Ceph RADOS services. The following are examples of defaults and added frontend and backend settings: + -``` +---- #--------------------------------------------------------------------- # common defaults that all the 'listen' and 'backend' sections will # use if not designated in their block @@ -82,75 +85,138 @@ backend be_rdgw server ceph02 ceph02:7480 check server ceph03 ceph03:7480 check backend be_redis -server quay01 quay01:6380 check inter 1s -server quay02 quay02:6380 check inter 1s -server quay03 quay03:6380 check inter 1s -``` - +server quay01 quay01:6379 check inter 1s +server quay02 quay02:6379 check inter 1s +server quay03 quay03:6379 check inter 1s +---- + -Once the new haproxy.cfg file is in place, restart the haproxy service. +After the new `haproxy.cfg` file is in place, restart the HAProxy service by entering the following command: + -``` +[source,terminal] +---- # systemctl restart haproxy -``` +---- -. **Install / Deploy a Database**: Install, enable and start the link:https://access.redhat.com/containers/?tab=overview#/registry.access.redhat.com/rhel8/postgresql-10)[PostgreSQL] database container. The following commands will: - -+ -* Start the PostgreSQL database with the user, password and database all set. Data from the container will be stored on the host system in the `/var/lib/pgsql/data` directory. -+ -* List available extensions. +. Create a folder for the PostgreSQL database by entering the following command: + -* Create the pg_trgm extension. -+ -* Confirm the extension is installed -+ -``` +[source,terminal] +---- $ mkdir -p /var/lib/pgsql/data +---- + +. Set the following permissions for the `/var/lib/pgsql/data` folder: ++ +[source,terminal] +---- $ chmod 777 /var/lib/pgsql/data +---- + +. Enter the following command to start the PostgreSQL database: ++ +[source,terminal] +---- $ sudo podman run -d --name postgresql_database \ -v /var/lib/pgsql/data:/var/lib/pgsql/data:Z \ -e POSTGRESQL_USER=quayuser -e POSTGRESQL_PASSWORD=quaypass \ -e POSTGRESQL_DATABASE=quaydb -p 5432:5432 \ - registry.redhat.io/rhel8/postgresql-10:1 + registry.redhat.io/rhel8/postgresql-13:1-109 +---- ++ +[NOTE] +==== +Data from the container will be stored on the host system in the `/var/lib/pgsql/data` directory. +==== +. List the available extensions by entering the following command: ++ +[source,terminal] +---- $ sudo podman exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_available_extensions" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +---- ++ +.Example output ++ +[source,terminal] +---- name | default_version | installed_version | comment -----------+-----------------+-------------------+---------------------------------------- adminpack | 1.0 | | administrative functions for PostgreSQL ... +---- +. Create the `pg_trgm` extension by entering the following command: ++ +[source,terminal] +---- $ sudo podman exec -it postgresql_database /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm;" | /opt/rh/rh-postgresql96/root/usr/bin/psql -d quaydb' +---- +. Confirm that the `pg_trgm` has been created by entering the following command: ++ +[source,terminal] +---- $ sudo podman exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_extension" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +---- ++ +.Example output ++ +[source,terminal] +---- extname | extowner | extnamespace | extrelocatable | extversion | extconfig | extcondition ---------+----------+--------------+----------------+------------+-----------+-------------- plpgsql | 10 | 11 | f | 1.0 | | pg_trgm | 10 | 2200 | t | 1.3 | | (2 rows) +---- +. Alter the privileges of the Postgres user `quayuser` and grant them the `superuser` role to give the user unrestricted access to the database: ++ +[source,terminal] +---- $ sudo podman exec -it postgresql_database /bin/bash -c 'echo "ALTER USER quayuser WITH SUPERUSER;" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +---- ++ +.Example output ++ +[source,terminal] +---- ALTER ROLE +---- -``` - -. **Open the firewall**: If you have a firewalld service active on your system, run the following commands to make the PostgreSQL port available through the firewall: - +. If you have a firewalld service active on your system, run the following commands to make the PostgreSQL port available through the firewall: + -``` +[source,terminal] +---- # firewall-cmd --permanent --zone=trusted --add-port=5432/tcp -success +---- ++ +[source,terminal] +---- # firewall-cmd --reload -success -``` - -. **Test PostgreSQL Connectivity**: Use the `psql` command to test connectivity to the PostgreSQL database. Try this on a remote system as well, to make sure you can access the service remotely: +---- +. Optional. If you do not have the `postgres` CLI package installed, install it by entering the following command: + -``` +[source,terminal] +---- # yum install postgresql -y +---- +. Use the `psql` command to test connectivity to the PostgreSQL database. ++ +[NOTE] +==== +To verify that you can access the service remotely, run the following command on a remote system. +==== ++ +---- # psql -h localhost quaydb quayuser +---- ++ +.Example output ++ +[source,terminal] +---- Password for user test: psql (9.2.23, server 9.6.5) WARNING: psql version 9.2, server version 9.6. @@ -158,4 +224,4 @@ WARNING: psql version 9.2, server version 9.6. Type "help" for help. test=> \q -``` +---- diff --git a/modules/proc_deploy_quay_local_ipv6.adoc b/modules/proc_deploy_quay_local_ipv6.adoc new file mode 100644 index 000000000..b376d72a1 --- /dev/null +++ b/modules/proc_deploy_quay_local_ipv6.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="poc-deploy-quay-local-ipv6"] += Configuring the Podman CNI to use IPv6 + +In some cases, you might want to run a local instance of {productname} to use IPv6. This setup is common for development or testing purposes. + +By default, the Podman network for a root user does not use IPv6, and instead defaults to use IPv4. You can configure the Podman Container Network Interface (CNI) to use both IPv4 and IPv6, which allows for a local instance of {productname} using IPv6. + +[discrete] +[id="additional-resources"] +== Additional resources + +* link:https://access.redhat.com/solutions/6196301[How to configure the default podman container network for the root user to use both IPv4 and IPv6]. \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_conf.adoc b/modules/proc_deploy_quay_poc_conf.adoc index b17fa607f..c2e2b93e8 100644 --- a/modules/proc_deploy_quay_poc_conf.adoc +++ b/modules/proc_deploy_quay_poc_conf.adoc @@ -1,9 +1,9 @@ :_content-type: PROCEDURE [id="poc-configuring-quay"] -= Configuring {productname} += Deploying {productname} config tool -Use the following procedure to generate a configuration file that details all components, including registry settings, the database, and Redis connection parameters. +Use the following procedure to deploy the {productname} configuration tool. Afterwards, you can navigate to the registry endpoint and generate a configuration file that details all components, including registry settings, the database, and Redis connection parameters. .Procedure @@ -43,41 +43,63 @@ In the {productname} configuration editor, you must enter the following credenti [id="poc-basic-configuration"] === Basic configuration -Under *Basic Configuration*, populate the *Registry Title* and *Registry Title Short* fields. The default values can be used if they are populated. +*Basic configuration* includes the *Registry Title*, *Registry Title Short*, *Enterprise Logo URL*, and *Contact Information* fields. +.Procedure + +[NOTE] +==== +The default values can be used if they are populated. +==== + +. For *Registry Title*, enter *Project Quay*. + +. For *Registry Title Short*, enter *Project Quay*. + +. Optional. Enter a URL for *Enterprise Logo URL*. + +. Optional. Enter contact information, choosing from one of the following options: *URL*, *E-mail*, *IRC*, *Telephone*. + +[id="poc-server-configuration"] === Server configuration -Under *Server Hostname*, specify the HTTP host and port for the location where the registry will be accessible on the network. +*Server configuration* includes the *Server Hostname* and optional *TLS* fields. -If you followed the instructions in this documenter, enter `quay-server.example.com`. +.Procedure + +* For this deployment, enter `quay-server.example.com`. [id="poc-database"] === Database In the *Database* section, specify the connection details for the database that {productname} uses to store metadata. -If you followed the instructions in this document for deploying a proof of concept system, enter the following values: +.Procedure -* **Database Type:** Postgres -* **Database Server:** quay-server.example.com:5432 -* **Username:** quayuser -* **Password:** quaypass -* **Database Name:** quay +. For **Database Type,** enter `Postgres`. +. For **Database Server,** enter `quay-server.example.com:5432`. +. For **Username,** enter `quayuser`. +. For **Password,** enter `quaypass`. +. For **Database Name,** enter `quay`. [id="poc-redis"] === Redis The Redis key-value store is used to store real-time events and build logs. -If you followed the instructions in this document for deploying a proof-of-concept system, enter the following credentials under the *Redis* section: +.Procedure -* **Redis Hostname:** quay-server.example.com -* **Redis port:** 6379 (default) -* **Redis password:** strongpassword +. For **Redis Hostname,** enter `quay-server.example.com`. +. For **Redis port,** enter `6379`. This is the default value. +. For **Redis password,** enter `strongpassword`. [id="poc-validating"] == Validate and download configuration -After all required fields have been set, validate your settings by clicking *Validate Configuration Changes*. If any errors are reported, continue editing your configuration until the settings are valid and {productname} can connect to your database and Redis servers. +After all required fields have been set, validate your settings. + +.Procedure +* Click the *Validate Configuration Changes* button. If any errors are reported, continue editing your configuration until the settings are valid and {productname} can connect to your database and Redis servers. ++ After validation, download the *Configuration* file. Stop the `Quay` container that is running the configuration editor. \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_db.adoc b/modules/proc_deploy_quay_poc_db.adoc index 2da302457..34ede21fe 100644 --- a/modules/proc_deploy_quay_poc_db.adoc +++ b/modules/proc_deploy_quay_poc_db.adoc @@ -1,14 +1,10 @@ :_content-type: PROCEDURE [id="poc-configuring-database"] - = Configuring the database -{productname} requires a database for storing metadata. Postgres is used throughout this document and is recommended for highly available configurations. Alternatively, you can use MySQL with a similar approach to configuration as described below. - -[id="poc-setting-up-postgres"] -== Setting up Postgres +{productname} requires a database for storing metadata. PostgreSQL is used throughout this document. For this deployment, a directory on the local file system to persist database data is used. -For the {productname} proof of concept, a directory on the local file system to persist database data is used. +Use the following procedure to set up a PostgreSQL database. .Procedure diff --git a/modules/proc_deploy_quay_poc_next.adoc b/modules/proc_deploy_quay_poc_next.adoc index f9344f237..e4b38835c 100644 --- a/modules/proc_deploy_quay_poc_next.adoc +++ b/modules/proc_deploy_quay_poc_next.adoc @@ -1,20 +1,25 @@ -= Next steps - -This document shows how to configure and deploy a proof-of-concept version of {productname}. For more information on deploying to a production environment, see the guide "Deploy {productname} - High Availability". +:_content-type: CONCEPT -The "Use {productname}" guide shows you how to: +[id="poc-next-steps"] += Next steps -* Add users and repositories -* Use tags -* Automatically build Dockerfiles with build workers -* Set up build triggers -* Add notifications for repository events +The following sections might be useful after deploying a proof of concept version of {productname}. Many of these procedures can be used on a proof of concept deployment, offering insights to {productname}'s features. +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/index[Using {productname}]. The content in this guide explains the following concepts: +** Adding users and repositories +** Using image tags +** Building Dockerfiles with build workers +** Setting up build triggers +** Adding notifications for repository events +** and more -The "Manage {productname}" guide shows you how to: +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/index[Managing {productname}]. The content in this guide explains the following concepts: -* Use SSL and TLS -* Enable security scanning with Clair -* Use repository mirroring -* Configure LDAP authentication -* Use georeplication of storage +** Using SSL/TLS +** Configuring action log storage +** Configuring Clair security scanner +** Repository mirroring +** IPv6 and dual-stack deployments +** Configuring OIDC for {productname} +** Geo-replication +** and more \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_redis.adoc b/modules/proc_deploy_quay_poc_redis.adoc index 88816138d..b8d78d2de 100644 --- a/modules/proc_deploy_quay_poc_redis.adoc +++ b/modules/proc_deploy_quay_poc_redis.adoc @@ -2,10 +2,7 @@ [id="poc-configuring-redis"] = Configuring Redis -Redis ia a key-value store that is used by {productname} for live builder logs and the {productname} tutorial. - -[id="poc-setting-up-redis"] -== Setting up Redis +Redis is a key-value store that is used by {productname} for live builder logs. Use the following procedure to deploy the `Redis` container for the {productname} proof of concept. diff --git a/modules/proc_deploy_quay_poc_restart.adoc b/modules/proc_deploy_quay_poc_restart.adoc index 65dfe6e52..57f97cb4f 100644 --- a/modules/proc_deploy_quay_poc_restart.adoc +++ b/modules/proc_deploy_quay_poc_restart.adoc @@ -1,6 +1,6 @@ = Restarting containers -Because the `--restart` option is not fully supported by podman, you can configure `podman` as a systemd service, as described +Because the `--restart` option is not fully supported by podman, you can configure `podman` as a systemd service, as described in link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#porting-containers-to-systemd-using-podman_building-running-and-managing-containers[Porting containers to systemd using Podman] @@ -88,9 +88,9 @@ Once you have the services configured and enabled, reboot the system. When the .... $ sudo podman ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4e87c7889246 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 19 seconds ago Up 18 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay -b8fbac1920d4 registry.redhat.io/rhel8/redis-5:1 run-redis 19 seconds ago Up 18 seconds ago 0.0.0.0:6379->6379/tcp redis -d959d5bf7a24 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 18 seconds ago Up 18 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 +4e87c7889246 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 19 seconds ago Up 18 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +b8fbac1920d4 registry.redhat.io/rhel8/redis-6:1-110) run-redis 19 seconds ago Up 18 seconds ago 0.0.0.0:6379->6379/tcp redis +d959d5bf7a24 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 18 seconds ago Up 18 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 e75ff8651dbd registry.redhat.io/quay/clair-rhel8:v3.4.0 18 seconds ago Up 17 seconds ago 0.0.0.0:8081->8080/tcp clairv4 .... @@ -136,9 +136,9 @@ Once you have updated the Quay service configuration, reboot the server and imme .... $ sudo podman ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4e87c7889246 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 29 seconds ago Up 28 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay -b8fbac1920d4 registry.redhat.io/rhel8/redis-5:1 run-redis 29 seconds ago Up 28 seconds ago 0.0.0.0:6379->6379/tcp redis -d959d5bf7a24 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 28 seconds ago Up 28 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 +4e87c7889246 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 29 seconds ago Up 28 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +b8fbac1920d4 registry.redhat.io/rhel8/redis-6:1-110) run-redis 29 seconds ago Up 28 seconds ago 0.0.0.0:6379->6379/tcp redis +d959d5bf7a24 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 28 seconds ago Up 28 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 e75ff8651dbd registry.redhat.io/quay/clair-rhel8:v3.4.0 28 seconds ago Up 27 seconds ago 0.0.0.0:8081->8080/tcp clairv4 .... @@ -147,10 +147,10 @@ Initially, the `Quay` container will not be available, but once the `RestartSec` .... $ sudo podman ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4e87c7889246 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 35 seconds ago Up 34 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +4e87c7889246 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 35 seconds ago Up 34 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay ab9f0e6ad7c3 registry.redhat.io/quay/quay-rhel8:v3.4.0 registry 3 seconds ago Up 2 seconds ago 0.0.0.0:8080->8080/tcp quay -b8fbac1920d4 registry.redhat.io/rhel8/redis-5:1 run-redis 35 seconds ago Up 34 seconds ago 0.0.0.0:6379->6379/tcp redis -d959d5bf7a24 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 34 seconds ago Up 34 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 +b8fbac1920d4 registry.redhat.io/rhel8/redis-6:1-110) run-redis 35 seconds ago Up 34 seconds ago 0.0.0.0:6379->6379/tcp redis +d959d5bf7a24 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 34 seconds ago Up 34 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 e75ff8651dbd registry.redhat.io/quay/clair-rhel8:v3.4.0 34 seconds ago Up 33 seconds ago 0.0.0.0:8081->8080/tcp clairv4 .... diff --git a/modules/proc_deploy_quay_poc_rhel.adoc b/modules/proc_deploy_quay_poc_rhel.adoc index 1c0e5d608..a12687380 100644 --- a/modules/proc_deploy_quay_poc_rhel.adoc +++ b/modules/proc_deploy_quay_poc_rhel.adoc @@ -9,10 +9,9 @@ Use the following procedures to configure {rhel} for a {productname} proof of co Use the following procedure to configure the {rhel} server for a {productname} proof of concept deployment. - .Procedure -. Install the latest {rhel-short} 8 server. You can do a minimal, shell-access only install, or Server plus GUI if you want a desktop. +. Install the latest {rhel-short} 9 server. You can do a minimal, shell-access only install, or Server plus GUI if you want a desktop. . Register and subscribe your {rhel-short} server system as described in link:https://access.redhat.com/solutions/253273[How to register and subscribe a RHEL system to the Red Hat Customer Portal using Red Hat Subscription-Manager] @@ -27,27 +26,6 @@ Use the following procedure to configure the {rhel} server for a {productname} p # yum update -y ---- -[id="poc-installing-podman"] -== Installing Podman - -Use the following procedure to install Podman. - -.Procedure - -* Enter the following command to install Podman: -+ -[source,terminal] ----- -$ sudo yum install -y podman ----- - -* Alternatively, you can install the `container-tools` module, which pulls in the full set of container software packages: -+ -[source,terminal] ----- -$ sudo yum module install -y container-tools ----- - [id="poc-registry-authentication"] == Registry authentication @@ -82,12 +60,12 @@ If you have a firewall running on your system, you might have to add rules that + [source,terminal] ---- -$ firewall-cmd --permanent --add-port=80/tcp -$ firewall-cmd --permanent --add-port=443/tcp -$ firewall-cmd --permanent --add-port=5432/tcp -$ firewall-cmd --permanent --add-port=5433/tcp -$ firewall-cmd --permanent --add-port=6379/tcp -$ firewall-cmd --reload +# firewall-cmd --permanent --add-port=80/tcp \ +&& firewall-cmd --permanent --add-port=443/tcp \ +&& firewall-cmd --permanent --add-port=5432/tcp \ +&& firewall-cmd --permanent --add-port=5433/tcp \ +&& firewall-cmd --permanent --add-port=6379/tcp \ +&& firewall-cmd --reload ---- @@ -96,6 +74,7 @@ $ firewall-cmd --reload There are several ways to configure the component containers in {productname} so that they can communicate with each other, for example: +//// * **Using the IP addresses for the containers**. You can determine the IP address for containers with `podman inspect` and then use the values in the configuration tool when specifying the connection strings, for example: + [source,terminal] @@ -104,6 +83,7 @@ $ sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay ---- + This approach is susceptible to host restarts, as the IP addresses for the containers will change after a reboot. +//// * **Using a naming service**. If you want your deployment to survive container restarts, which typically result in changed IP addresses, you can implement a naming service. For example, the link:https://github.com/containers/dnsname[dnsname] plugin is used to allow containers to resolve each other by name. @@ -111,16 +91,7 @@ This approach is susceptible to host restarts, as the IP addresses for the conta * **Configuring port mapping**. You can use port mappings to expose ports on the host and then use these ports in combination with the host IP address or host name. -This document uses port mapping and assumes a static IP address for your host system. Throughout the deployment, `quay-sever.example.com` is used with the `192.168.1.112` IP address. This information is established in the `/etc/hosts` file, for example: -[source,terminal] ----- -$ cat /etc/hosts ----- -Example output: -[source,terminal] ----- -192.168.1.112 quay-server.example.com ----- +This document uses port mapping and assumes a static IP address for your host system. .Sample proof of concept port mapping [%header, cols="2,1,1"] diff --git a/modules/proc_deploy_quay_poc_run.adoc b/modules/proc_deploy_quay_poc_run.adoc index 312e57795..20b166640 100644 --- a/modules/proc_deploy_quay_poc_run.adoc +++ b/modules/proc_deploy_quay_poc_run.adoc @@ -2,47 +2,94 @@ [id="poc-deploying-quay"] = Deploying {productname} -== Prerequisites +After you have configured your {productname} deployment, you can deploy it using the following procedures. + +.Prerequisites * The {productname} database is running. * The Redis server is running. -* You have generated a valid configuration file. -* You have stopped the `Quay` container that was running the configuration editor. -[id="preparing-configuration-folder"] -== Preparing the configuration folder +[id="preparing-configuration-file"] +== Creating the YAML configuration file -Use the following procedure to prepare your {productname} configuration folder. +Use the following procedure to deploy {productname} locally. -.Procedure +.Procedure -. Create a directory to copy the {productname} configuration bundle to: +. Enter the following command to create a minimal `config.yaml` file that is used to deploy the {productname} container: + [source,terminal] ---- -$ mkdir $QUAY/config +$ touch config.yaml ---- -. Copy the generated {productname} configuration bundle to the directory: +. Copy and paste the following YAML configuration into the `config.yaml` file: + -[source,terminal] +[source,yaml] ---- -$ cp ~/Downloads/quay-config.tar.gz ~/config +BUILDLOGS_REDIS: + host: quay-server.example.com + password: strongpassword + port: 6379 +CREATE_NAMESPACE_ON_PUSH: true +DATABASE_SECRET_KEY: a8c2744b-7004-4af2-bcee-e417e7bdd235 +DB_URI: postgresql://quayuser:quaypass@quay-server.example.com:5432/quay +DISTRIBUTED_STORAGE_CONFIG: + default: + - LocalStorage + - storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +FEATURE_MAILING: false +SECRET_KEY: e9bd34f4-900c-436a-979e-7530e5d74ac8 +SERVER_HOSTNAME: quay-server.example.com +SETUP_COMPLETE: true +USER_EVENTS_REDIS: + host: quay-server.example.com + password: strongpassword + port: 6379 ---- -. Change into the the directory: +. Create a directory to copy the {productname} configuration bundle to: + [source,terminal] ---- -$ cd $QUAY/config +$ mkdir $QUAY/config ---- -. Unpack the {productname} configuration bundle: +. Copy the {productname} configuration file to the directory: + [source,terminal] ---- -$ tar xvf quay-config.tar.gz +$ cp -v config.yaml $QUAY/config +---- + +[id="configuring-superuser"] +=== Configuring a {productname} superuser + +You can optionally add a superuser by editing the `config.yaml` file to add the necessary configuration fields. The list of superuser accounts is stored as an array in the field `SUPER_USERS`. Superusers have the following capabilities: + +* User management +* Organization management +* Service key management +* Change log transparency +* Usage log management +* Globally-visible user message creation + +.Procedure + +. Add the `SUPER_USERS` array to the `config.yaml` file: ++ +[source,yaml] +---- +SERVER_HOSTNAME: quay-server.example.com +SETUP_COMPLETE: true +SUPER_USERS: + - quayadmin <1> +... ---- +<1> If following this guide, use `quayadmin`. [id="preparing-local-storage"] == Prepare local storage for image data @@ -68,9 +115,11 @@ $ setfacl -m u:1001:-wx $QUAY/storage [id="deploy-quay-registry"] == Deploy the {productname} registry -. Use the following procedure to deploy the `Quay` registry container. +Use the following procedure to deploy the `Quay` registry container. + +.Procedure -. Enter the following command to start the `Quay` registry container, specifying the appropriate volumes for configuration data and local storage for image data: +* Enter the following command to start the `Quay` registry container, specifying the appropriate volumes for configuration data and local storage for image data: + [subs="verbatim,attributes"] ---- diff --git a/modules/proc_deploy_quay_poc_use.adoc b/modules/proc_deploy_quay_poc_use.adoc index d21ccfa51..0d53208fe 100644 --- a/modules/proc_deploy_quay_poc_use.adoc +++ b/modules/proc_deploy_quay_poc_use.adoc @@ -1,25 +1,49 @@ +:_content-type: CONCEPT +[id="use-quay-poc"] = Using {productname} -The following steps allow you to use the interface and create new organizations and repositories , and to search and browse existing repositories. Following step 3, you can use the command line interface to interact with the registry, and to push and pull images. -. Use your browser to access the user interface for the {productname} registry at `\http://quay-server.example.com`, assuming you have configured `quay-server.example.com` as your hostname in your `/etc/hosts` file. +The following steps show you how to use the interface to create new organizations and repositories, and to search and browse existing repositories. Following step 3, you can use the command line interface to interact with the registry and to push and pull images. + +.Procedure + +. Use your browser to access the user interface for the {productname} registry at `\http://quay-server.example.com`, assuming you have configured `quay-server.example.com` as your hostname in your `/etc/hosts` file and in your `config.yaml` file. . Click `Create Account` and add a user, for example, `quayadmin` with a password `password`. . From the command line, log in to the registry: + -.... +[source,terminal] +---- $ sudo podman login --tls-verify=false quay-server.example.com +---- ++ +.Example output ++ +[source,terminal] +---- Username: quayadmin Password: password Login Succeeded! -.... +---- -== Push and pull images +[id="pushing-pulling-images-poc"] +== Pushing and pulling images on {productname} + +Use the following procedure to push and pull images to your {productname} registry. + +.Procedure . To test pushing and pulling images from the {productname} registry, first pull a sample image from an external registry: + -.... +[source,terminal] +---- $ sudo podman pull busybox +---- ++ +.Example output ++ +[source,terminal] +---- Trying to pull docker.io/library/busybox... Getting image source signatures Copying blob 4c892f00285e done @@ -27,45 +51,73 @@ Copying config 22667f5368 done Writing manifest to image destination Storing signatures 22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 -.... +---- -. Use the `podman images` command to see the local copy: +. Enter the following command to see the local copy of the image: + -.... +[source,terminal] +---- $ sudo podman images +---- ++ +.Example output ++ +[source,terminal] +---- REPOSITORY TAG IMAGE ID CREATED SIZE docker.io/library/busybox latest 22667f53682a 14 hours ago 1.45 MB -... -.... +---- -. Tag this image, in preparation for pushing it to the {productname} registry: +. Enter the following command to tag this image, which prepares the image for pushing it to the registry: + -.... +[source,terminal] +---- $ sudo podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test -.... +---- -. Next, push the image to the {productname} registry. Following this step, you can use your browser to see the tagged image in your repository. -+ -.... +. Push the image to your registry. Following this step, you can use your browser to see the tagged image in your repository. ++ +[source,terminal] +---- $ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- ++ +.Example output ++ +[source,terminal] +---- Getting image source signatures Copying blob 6b245f040973 done Copying config 22667f5368 done Writing manifest to image destination Storing signatures -.... +---- . To test access to the image from the command line, first delete the local copy of the image: -+ -.... ++ +[source,terminal] +---- $ sudo podman rmi quay-server.example.com/quayadmin/busybox:test +---- ++ +Example output ++ +[source,terminal] +---- Untagged: quay-server.example.com/quayadmin/busybox:test -.... +---- . Pull the image again, this time from your {productname} registry: -+ -.... ++ +[source,terminal] +---- $ sudo podman pull --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- ++ +.Example output ++ +[source,terminal] +---- Trying to pull quay-server.example.com/quayadmin/busybox:test... Getting image source signatures Copying blob 6ef22a7134ba [--------------------------------------] 0.0b / 0.0b @@ -73,4 +125,21 @@ Copying config 22667f5368 done Writing manifest to image destination Storing signatures 22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 -.... +---- + +[id="accessing-superuser-admin-panel"] +== Accessing the superuser admin panel + +If you added a superuser to your `config.yaml` file, you can access the *Superuser Admin Panel* on the {productname} UI by using the following procedure. + +.Prerequisites + +* You have configured a superuser. + +.Procedure + +. Access the *Superuser Admin Panel* on the {productname} UI by clicking on the current user's name or avatar in the navigation pane of the UI. Then, click *Superuser Admin Panel*. ++ +image:super-user-admin-panel.png[Super User Admin Panel] ++ +On this page, you can manage users, your organization, service keys, view change logs, view usage logs, and create global messages for your organization. \ No newline at end of file diff --git a/modules/proc_deploy_quay_single.adoc b/modules/proc_deploy_quay_single.adoc index 00c92401f..4b6eaea35 100644 --- a/modules/proc_deploy_quay_single.adoc +++ b/modules/proc_deploy_quay_single.adoc @@ -1,7 +1,13 @@ -== Starting up the supporting services -Follow these steps to install {productname} on a single system (VM or bare metal). +:_content-type: ASSEMBLY +include::modules/attributes.adoc[] + +[id="starting-up-supporting-service"] += Starting up the supporting services + +Follow these steps to install {productname} on a single system, either virtual machine (VM) or bare metal. . **Install Red Hat Enterprise Linux server**: Install the latest RHEL server. You can do a Minimal install (shell access only) or Server plus GUI (if you want a desktop). + . **Register the System**: Register and subscribe your RHEL server system to Red Hat. See link:https://access.redhat.com/solutions/253273[How to register and subscribe a system...] for details. The following commands register your system and list available subscriptions. Choose an available RHEL server subscription, attach to its poolid, enable rhel-7-server-rpms and rhel-7-server-extras-rpms repositories, and upgrade to the latest software: + [NOTE] @@ -11,21 +17,44 @@ so you would need to use the `podman` command instead. Because the `--restart` option is not supported by podman, instead of using `--restart`, you could set up to use `podman` as a systemd service, as described in link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#starting_containers_with_systemd[Starting containers with systemd]. - ==== - + -.... +[source,terminal] +---- # subscription-manager register --username= --password= +---- ++ +[source,terminal] +---- # subscription-manager refresh +---- ++ +[source,terminal] +---- # subscription-manager list --available +---- ++ +[source,terminal] +---- # subscription-manager attach --pool= +---- ++ +[source,terminal] +---- # subscription-manager repos --disable="*" +---- ++ +[source,terminal] +---- # subscription-manager repos \ --enable="rhel-7-server-rpms" \ --enable="rhel-7-server-extras-rpms" +---- ++ +[source,terminal] +---- # yum update -y -.... +---- ifdef::downstream[] . **Add registry.redhat.io authentication**: Set up authentication to registry.redhat.io, so you can pull the `Quay` container, as described in link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication]. Note that this differs from earlier {productname} releases where the images were hosted on quay.io. diff --git a/modules/proc_generating-splunk-token.adoc b/modules/proc_generating-splunk-token.adoc new file mode 100644 index 000000000..24200e396 --- /dev/null +++ b/modules/proc_generating-splunk-token.adoc @@ -0,0 +1,66 @@ +:_content-type: PROCEDURE +[id="proc_generating-splunk-token"] += Generating a Splunk token + +Use one of the following procedures to create a bearer token for Splunk. + +[id="proc_generating-splunk-token-ui"] +== Generating a Splunk token using the Splunk UI + +Use the following procedure to create a bearer token for Splunk using the Splunk UI. + +.Prerequisites + +* You have installed Splunk and created a username. + +.Procedure + +. On the Splunk UI, navigate to *Settings* -> *Tokens*. + +. Click *Enable Token Authentication*. + +. Ensure that *Token Authentication* is enabled by clicking *Token Settings* and selecting *Token Authentication* if necessary. + +. Optional: Set the expiration time for your token. This defaults at 30 days. + +. Click *Save*. + +. Click *New Token*. + +. Enter information for *User* and *Audience*. + +. Optional: Set the *Expiration* and *Not Before* information. + +. Click *Create*. Your token appears in the *Token* box. Copy the token immediately. ++ +[IMPORTANT] +==== +If you close out of the box before copying the token, you must create a new token. The token in its entirety is not available after closing the *New Token* window. +==== + +[id="proc_generating-splunk-token-cli"] +== Generating a Splunk token using the CLI + +Use the following procedure to create a bearer token for Splunk using the CLI. + +.Prerequisites + +* You have installed Splunk and created a username. + +.Procedure + +. In your CLI, enter the following `CURL` command to enable token authentication, passing in your Splunk username and password: ++ +[source,terminal] +---- +$ curl -k -u : -X POST ://:/services/admin/token-auth/tokens_auth -d disabled=false +---- + +. Create a token by entering the following `CURL` command, passing in your Splunk username and password. ++ +[source,terminal] +---- +$ curl -k -u : -X POST ://:/services/authorization/tokens?output_mode=json --data name= --data audience=Users --data-urlencode expires_on=+30d +---- + +. Save the generated bearer token. \ No newline at end of file diff --git a/modules/proc_github-app.adoc b/modules/proc_github-app.adoc index bdddc4bce..52548077d 100644 --- a/modules/proc_github-app.adoc +++ b/modules/proc_github-app.adoc @@ -1,26 +1,38 @@ -[[github-app]] +[id="github-app"] = Creating an OAuth application in GitHub -You can authorize your registry to access a GitHub account and its repositories by registering it as a GitHub OAuth application. +The following sections describe how to authorize {productname} to integrate with GitHub by creating an OAuth application. This allows {productname} to access GitHub repositories on behalf of a user. -[[github-app-create]] +OAuth integration with GitHub is primarily used to allow features like automated builds, where {productname} can be enabled to monitor specific GitHub repositories for changes like commits or pull requests, and trigger contain image builds when those changes are made. + +[id="github-app-create"] == Create new GitHub application -. Log into GitHub (Enterprise) -. Visit the Applications page under your organization's settings. -. Click link:https://github.com/settings/applications/new[Register New Application]. The `Register a new OAuth application` configuration screen is displayed: - image:register-app.png[Register a new OAuth application] -. Set Homepage URL: Enter the Quay Enterprise URL as the `Homepage URL` +Use the following procedure to create an OAuth application in Github. + +.Procedure + +. Log into link:https://github.com/enterprise[GitHub Enterprise]. + +. In the navigation pane, select your username -> *Your organizations*. + +. In the navigation pane, select *Applications* -> *Developer Settings*. + +. In the navigation pane, click *OAuth Apps* -> *New OAuth App*. You are navigated to the following page: ++ +image:register-app.png[Register a new OAuth application] + +. Enter a name for the application in the *Application name* textbox. + +. In the *Homepage URL* textbox, enter your {productname} URL. + [NOTE] ==== -If using public GitHub, the Homepage URL entered must be accessible by your users. It can still be an internal URL. +If you are using public GitHub, the Homepage URL entered must be accessible by your users. It can still be an internal URL. ==== -. Set Authorization callback URL: Enter -https://{$RED_HAT_QUAY_URL}/oauth2/github/callback -as the Authorization callback URL. +. In the *Authorization callback URL*, enter *https:///oauth2/github/callback*. -. Save your settings by clicking the Register application button. The new new application's summary is shown: +. Click *Register application* to save your settings. -. Record the Client ID and Client Secret shown for the new application. +. When the new application's summary is shown, record the Client ID and the Client Secret shown for the new application. \ No newline at end of file diff --git a/modules/proc_installing-creating-username-splunk.adoc b/modules/proc_installing-creating-username-splunk.adoc new file mode 100644 index 000000000..4fed26b3e --- /dev/null +++ b/modules/proc_installing-creating-username-splunk.adoc @@ -0,0 +1,17 @@ +:_content-type: PROCEDURE +[id="proc_installing-creating-username-splunk"] += Installing and creating a username for Splunk + +Use the following procedure to install and create Splunk credentials. + +.Procedure + +. Create a Splunk account by navigating to link:https://www.splunk.com/en_us/sign-up.html[Splunk] and entering the required credentials. + +. Navigate to the link:https://www.splunk.com/en_us/download/splunk-enterprise.html[Splunk Enterprise] *Free Trial* page, select your platform and installation package, and then click *Download Now*. + +. Install the Splunk software on your machine. When prompted, create a username, for example, `splunk_admin` and password. + +. After creating a username and password, a localhost URL will be provided for your Splunk deployment, for example, `http://.remote.csb:8000/`. Open the URL in your preferred browser. + +. Log in with the username and password you created during installation. You are directed to the Splunk UI. \ No newline at end of file diff --git a/modules/proc_installing-qbo-on-ocp.adoc b/modules/proc_installing-qbo-on-ocp.adoc index ffe0c94db..d8075bd5f 100644 --- a/modules/proc_installing-qbo-on-ocp.adoc +++ b/modules/proc_installing-qbo-on-ocp.adoc @@ -1,5 +1,5 @@ :_content-type: PROCEDURE -[[installing-qbo-on-ocp]] +[id="installing-qbo-on-ocp"] = Installing the {qbo} on {ocp} In this procedure, you will install the {qbo} on {ocp}. diff --git a/modules/proc_manage-advanced-config.adoc b/modules/proc_manage-advanced-config.adoc index b72494a67..029490cab 100644 --- a/modules/proc_manage-advanced-config.adoc +++ b/modules/proc_manage-advanced-config.adoc @@ -1,20 +1,21 @@ :_content-type: CONCEPT - [id="advanced-quay-configuration"] = Advanced {productname} configuration -You can configure your {productname} after initial deployment using one of the following interfaces: +You can configure your {productname} after initial deployment using one of the following methods: -* The {productname} Config Tool. With this tool, a web-based interface for configuring the {productname} cluster is provided when running the `Quay` container in `config` mode. This method is recommended for configuring the {productname} service. +//// +* *Using the {productname} Config Tool*. With this tool, a web-based interface for configuring the {productname} cluster is provided when running the `Quay` container in `config` mode. This method is recommended for configuring the {productname} service. +//// -* Editing the `config.yaml`. The `config.yaml` file contains most configuration information for the {productname} cluster. Editing the `config.yaml` file directly is possible, but it is only recommended for advanced tuning and performance features that are not available through the Config Tool. +* *Editing the `config.yaml` file*. The `config.yaml` file contains most configuration information for the {productname} cluster. Editing the `config.yaml` file directly is the primary method for advanced tuning and enabling specific features. -* {productname} API. Some {productname} features can be configured through the API. +* *Using the {productname} API*. Some {productname} features can be configured through the API. This content in this section describes how to use each of the aforementioned interfaces and how to configure your deployment with advanced features. -[[using-the-config-tool]] - +//// +[id="using-the-config-tool"] == Using {productname} Config Tool to modify {productname} The {productname} Config Tool is made available by running a `Quay` container in `config` mode alongside the regular {productname} service. @@ -52,6 +53,7 @@ image:configtoolsetup.png[Modify {productname} cluster settings from the Config . Select *Go to deployment rollout* -> *Populate the configuration to deployments*. Wait for the {productname} pods to restart for the changes to take effect. + [id="running-config-tool-from-cli"] === Running the Config Tool from the command line @@ -86,7 +88,7 @@ To modify an existing config bundle, you can mount your configuration directory [id="deploying-config-tool-using-tls"] === Deploying the config tool using TLS certificates -You can deploy the config tool with secured TLS certificates by passing environment variables to the runtime variable. This ensures that sensitive data like credentials for the database and storage backend are protected. +You can deploy the config tool with secured SSL/TLS certificates by passing environment variables to the runtime variable. This ensures that sensitive data like credentials for the database and storage backend are protected. The public and private keys must contain valid Subject Alternative Names (SANs) for the route that you deploy the config tool on. @@ -105,41 +107,62 @@ $ podman run --rm -it --name quay_config -p 7070:8080 \ -e DEBUGLOG=true \ -ti config-app:dev ---- +//// [id="overview-advanced-config"] == Using the API to modify {productname} See the -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_api_guide/index[{productname} API Guide] for information on how to access {productname} API. +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API Guide] for information on how to access {productname} API. [id="editing-config-file-to-modify-quay"] -== Editing the `config.yaml` file to modify {productname} +== Editing the config.yaml file to modify {productname} -Some advanced configuration features that are not available through the Config Tool can be implemented by editing the `config.yaml` file directly. Available settings are described in the -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/quay-schema[Schema for {productname} configuration] +Advanced features can be implemented by editing the `config.yaml` file directly. All configuration fields for {productname} features and settings are available in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index[{productname} configuration guide]. -The following examples are settings you can change directly in the `config.yaml` file. +The following example is one setting that you can change directly in the `config.yaml` file. Use this example as a reference when editing your `config.yaml` file for other features and settings. [id="add-name-and-company-to-quay-sign-in"] -=== Add name and company to {productname} sign-in -By setting the following field, users are prompted for their name and company when they first sign in. This is an optional field, but can provide your with extra data about your {productname} users. +=== Adding name and company to {productname} sign-in + +By setting the `FEATURE_USER_METADATA` field to `true`, users are prompted for their name and company when they first sign in. This is an optional field, but can provide your with extra data about your {productname} users. + +Use the following procedure to add a name and a company to the {productname} sign-in page. + +.Procedure + +. Add, or set, the `FEATURE_USER_METADATA` configuration field to `true` in your `config.yaml` file. For example: [source,yaml] ---- ---- +# ... FEATURE_USER_METADATA: true ---- +# ... ---- +. Redeploy {productname}. + +. Now, when prompted to log in, users are requested to enter the following information: ++ +image:metadata-request.png[Metadata request] + +//// [id="disable-tls-protocols"] === Disable TLS Protocols -You can change the `SSL_PROTOCOLS` setting to remove SSL protocols that you do not want to support in your {productname} instance. For example, to remove TLS v1 support from the default `SSL_PROTOCOLS:['TLSv1','TLSv1.1','TLSv1.2']`, change it to the following: + +You can change the `SSL_PROTOCOLS` setting to remove SSL protocols that you do not want to support in your {productname} instance. By default, {productname} is configured to support `TLSv1`, `TLSv1.1`, and `TLSv1.2`. +Use the following procedure to remove TLS v1 support from {productname}. + +.Procedure + + +to remove TLS v1 support from the default `SSL_PROTOCOLS:['TLSv1','TLSv1.1','TLSv1.2']`, change it to the following: [source,yaml] ---- ---- +# ... SSL_PROTOCOLS : ['TLSv1.1','TLSv1.2'] ---- +# ... ---- [id="rate-limit-api-calls"] @@ -308,10 +331,10 @@ The following table lists and describes each variable and the values they can ex [cols="2a,2a,2a",options="header"] |=== | Variable | Description | Values -| WORKER_COUNT_REGISTRY | Specifies the number of processes to handle registry requests within the `Quay` container. | Integer between 8 and 64 -| WORKER_COUNT_WEB | Specifies the number of processes to handle UI/Web requests within the container. | Integer between 2 and 32 -| WORKER_COUNT_SECSCAN | Specifies the number of processes to handle Security Scanning (for example, Clair) integration within the container. | Integer between 2 and 4 -| DB_CONNECTION_POOLING | Toggle database connection pooling. | "true" or "false" +| *WORKER_COUNT_REGISTRY* | Specifies the number of processes to handle registry requests within the `Quay` container. | Integer between `8` and `64` +| *WORKER_COUNT_WEB* | Specifies the number of processes to handle UI/Web requests within the container. | Integer between `2` and `32` +| *WORKER_COUNT_SECSCAN* | Specifies the number of processes to handle Security Scanning (for example, Clair) integration within the container. | Integer. Because the Operator specifies 2 vCPUs for resource requests and limits, setting this value between `2` and `4` is safe. However, users can run more, for example, `16`, if warranted. +| *DB_CONNECTION_POOLING* | Toggle database connection pooling. | `true` or `false` |=== [id="turning-off-connection-pooling"] @@ -327,3 +350,4 @@ If turning off connection pooling is not enough to prevent hitting the 2k database connection limit, you need to take additional steps to deal with the problem. If this happens, you might need to increase the maximum database connections to better suit your workload. +//// \ No newline at end of file diff --git a/modules/proc_manage-ipv6-dual-stack.adoc b/modules/proc_manage-ipv6-dual-stack.adoc index 889ddd29c..8106db0b2 100644 --- a/modules/proc_manage-ipv6-dual-stack.adoc +++ b/modules/proc_manage-ipv6-dual-stack.adoc @@ -95,7 +95,7 @@ $ curl --ipv6 After enabling dual-stack in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured for dual-stack. [id="proc_manage-ipv6-limitations-38"] -== IPv6 and dua-stack limitations +== IPv6 and dual-stack limitations * Currently, attempting to configure your {productname} deployment with the common Azure Blob Storage configuration will not work on IPv6 single stack environments. Because the endpoint of Azure Blob Storage does not support IPv6, there is no workaround in place for this issue. + @@ -103,6 +103,4 @@ For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4433[PR * Currently, attempting to configure your {productname} deployment with Amazon S3 CloudFront will not work on IPv6 single stack environments. Because the endpoint of Amazon S3 CloudFront does not support IPv6, there is no workaround in place for this issue. + -For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4470[PROJQUAY-4470]. - -* Currently, OpenShift Data Foundations (ODF) is unsupported when {productname} is deployed on IPv6 single stack environments. As a result, ODF cannot be used in IPv6 environments. This limitation is scheduled to be fixed in a future version of OpenShift Data Foundations. \ No newline at end of file +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4470[PROJQUAY-4470]. \ No newline at end of file diff --git a/modules/proc_manage-ldap-setup.adoc b/modules/proc_manage-ldap-setup.adoc index cfc710004..01e33b519 100644 --- a/modules/proc_manage-ldap-setup.adoc +++ b/modules/proc_manage-ldap-setup.adoc @@ -1,100 +1,152 @@ -[[ldap-authentication-setup-for-quay-enterprise]] +:_content-type: CONCEPT +[id="ldap-authentication-setup-for-quay-enterprise"] = LDAP Authentication Setup for {productname} -The Lightweight Directory Access Protocol (LDAP) is an open, -vendor-neutral, industry standard application protocol for accessing and -maintaining distributed directory information services over an Internet -Protocol (IP) network. {productname} supports using LDAP as an -identity provider. +Lightweight Directory Access Protocol (LDAP) is an open, vendor-neutral, industry standard application protocol for accessing and maintaining distributed directory information services over an Internet Protocol (IP) network. {productname} supports using LDAP as an identity provider. -== Considerations prior to enabling LDAP +[id="ldap-considerations"] +== Considerations when enabling LDAP -[[considerations-for-existing-quay-deployments]] -=== Existing Quay deployments -Conflicts between user names can arise when you enable LDAP for an existing Quay deployment that already has users configured. Consider the scenario where a particular user, `alice`, was manually created in Quay prior to enabling LDAP. If the user name `alice` also exists in the LDAP directory, Quay will create a new user `alice-1` when `alice` logs in for the first time using LDAP, and will map the LDAP credentials to this account. This might not be want you want, for consistency reasons, and it is recommended that you remove any potentially conflicting local account names from Quay prior to enabling LDAP. +Prior to enabling LDAP for your {productname} deployment, you should consider the following. -[[considerations-for-manual-user-creation]] -=== Manual User Creation and LDAP authentication +[discrete] +[id="existing-quay-deployments"] +=== Existing {productname} deployments -When Quay is configured for LDAP, LDAP-authenticated users are automatically created in Quay's database on first log in, if the configuration option `FEATURE_USER_CREATION` is set to `true`. If this option is set to `false`, the automatic user creation for LDAP users will fail and the user is not allowed to log in. In this scenario, the superuser needs to create the desired user account first. -Conversely, if `FEATURE_USER_CREATION` is set to `true`, this also means that a user can still create an account from the Quay login screen, even if there is an equivalent user in LDAP. +Conflicts between usernames can arise when you enable LDAP for an existing {productname} deployment that already has users configured. For example, one user, `alice`, was manually created in {productname} prior to enabling LDAP. If the username `alice` also exists in the LDAP directory, {productname} automatically creates a new user, `alice-1`, when `alice` logs in for the first time using LDAP. {productname} then automatically maps the LDAP credentials to the `alice` account. For consistency reasons, this might be erroneous for your {productname} deployment. It is recommended that you remove any potentially conflicting local account names from {productname} prior to enabling LDAP. -[[setup-ldap-configuration]] -== Set Up LDAP Configuration +[discrete] +[id="considerations-for-manual-user-creation"] +=== Manual User Creation and LDAP authentication -In the config tool, locate the Authentication section and select “LDAP” from the drop-down menu. Update LDAP configuration fields as required. +When {productname} is configured for LDAP, LDAP-authenticated users are automatically created in {productname}'s database on first log in, if the configuration option `FEATURE_USER_CREATION` is set to `true`. If this option is set to `false`, the automatic user creation for LDAP users fails, and the user is not allowed to log in. In this scenario, the superuser needs to create the desired user account first. Conversely, if `FEATURE_USER_CREATION` is set to `true`, this also means that a user can still create an account from the {productname} login screen, even if there is an equivalent user in LDAP. -image:authentication-ldap.png[Fill in LDAP information] +[id="setup-ldap-configuration"] +== Configuring LDAP for {productname} -* Here is an example of the resulting entry in the _config.yaml_ file: -.... -AUTHENTICATION_TYPE: LDAP -.... +You can configure LDAP for {productname} by updating your `config.yaml` file directly and restarting your deployment. Use the following procedure as a reference when configuring LDAP for {productname}. -=== Full LDAP URI +//// +.Procedure -image:authentication-ldap-uri.png[LDAP server URI] -image:authentication-ldap-ssl.png[LDAP server SSL] +. You can use the {productname} config tool to configure LDAP. -* The full LDAP URI, including the _ldap://_ or _ldaps://_ prefix. -* A URI beginning with _ldaps://_ will make use of the provided SSL certificate(s) for TLS setup. -* Here is an example of the resulting entry in the _config.yaml_ file: -.... -LDAP_URI: ldaps://ldap.example.org -.... - -=== Team Synchronization +.. Using the {productname} config tool, locate the *Authentication* section. Select *LDAP* from the dropdown menu, and update the LDAP configuration fields as required. ++ +image:authentication-ldap.png[LDAP configuration fields] +.. Optional. On the *Team synchronization* box, and click *Enable Team Syncrhonization Support*. With team synchronization enabled, {productname} administrators who are also superusers can set teams to have their membership synchronized with a backing group in LDAP. ++ image:authentication-ldap-team-sync-1.png[Team synchronization] -* If enabled, organization administrators who are also superusers can set teams to have their membership synchronized with a backing group in LDAP. +.. For *Resynchronization duration* enter *60m*. This option sets the resynchronization duration at which a team must be re-synchronized. This field must be set similar to the following examples: `30m`, `1h`, `1d`. +.. Optional. For *Self-service team syncing setup*, you can click *Allow non-superusers to enable and manage team syncing* to allow superusers the ability to enable and manage team syncing under the organizations that they are administrators for. ++ image:authentication-ldap-team-sync-2.png[Team synchronization] -* The resynchronization duration is the period at which a team must be re-synchronized. Must be expressed in a duration string form: 30m, 1h, 1d. -* Optionally allow non-superusers to enable and manage team syncing under organizations in which they are administrators. -* Here is an example of the resulting entries in the _config.yaml_ file: -.... -FEATURE_TEAM_SYNCING: true -TEAM_RESYNC_STALE_TIME: 60m -FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: true -.... - -=== Base and Relative Distinguished Names +.. Locate the *LDAP URI* box and provide a full LDAP URI, including the _ldap://_ or _ldaps://_ prefix, for example, `ldap://117.17.8.101`. ++ +image:authentication-ldap-uri.png[LDAP server URI] +.. Under *Base DN*, provide a name which forms the base path for looking up all LDAP records, for example, `o=`,`dc=`,`dc=com`. ++ image:authentication-ldap-basedn.png[Distinguished Names] -* A Distinguished Name path which forms the base path for looking up all LDAP records. Example: _dc=my,dc=domain,dc=com_ -* Optional list of Distinguished Name path(s) which form the secondary base path(s) for looking up all user LDAP records, relative to the Base DN defined above. These path(s) will be tried if the user is not found via the primary relative DN. -* User Relative DN is relative to BaseDN. Example: _ou=NYC_ not _ou=NYC,dc=example,dc=org_ -* Multiple “Secondary User Relative DNs” may be entered if there are multiple Organizational Units where User objects are located at. Simply type in the Organizational Units and click on Add button to add multiple RDNs. Example: _ou=Users,ou=NYC and ou=Users,ou=SFO_ -* The "User Relative DN" searches with subtree scope. For example, if your Organization has Organizational Units NYC and SFO under the Users OU (_ou=SFO,ou=Users_ and _ou=NYC,ou=Users_), {productname} can authenticate users from both the _NYC_ and _SFO_ Organizational Units if the User Relative DN is set to Users (_ou=Users_). -* Here is an example of the resulting entries in the _config.yaml_ file: -.... -LDAP_BASE_DN: -- dc=example -- dc=com -LDAP_USER_RDN: -- ou=users -LDAP_SECONDARY_USER_RDNS: -- ou=bots -- ou=external -.... +.. Under *User Relative DN*, provide a list of Distinguished Name path(s), which form the secondary base path(s) for looking up all user LDAP records relative to the *Base DN* defined above. For example, `uid=`,`ou=Users`,`o=`,`dc=`,`dc=com`. This path, or these paths, is tried if the user is not found through the primary relative DN. ++ +image:user-relative-dn.png[User Relative DN] ++ +[NOTE] +==== +*User Relative DN* is relative to *Base DN*, for example, `ou=Users` and not `ou=Users,dc=,dc=com`. +==== -=== Additional User Filters +.. Optional. Provide *Secondary User Relative DNs* if there are multiple Organizational Units where user objects are located. You can type in the Organizational Units and click *Add* to add multiple RDNs. For example, `ou=Users,ou=NYC and ou=Users,ou=SFO`. ++ +The *User Relative DN* searches with subtree scope. For example, if your organization has Organization Units `NYC` and `SFO` under the Users OU (that is, `ou=SFO,ou=Users` and `ou=NYC,ou=Users`), {productname} can authenticate users from both the `NYC` and `SFO` Organizational Units if the *User Relative DN* is set to `Users` (`ou=Users`). + +.. Optional. Fill in the *Additional User Filter Expression* field for all user lookup queries if desired. Distinguished Names used in the filter must be full based. The *Base DN* is not added automatically added to this field, and you must wrap the text in parentheses, for example, `(memberOf=cn=developers,ou=groups,dc=,dc=com)`. ++ +image:authentication-ldap-user-filter.png[Additional User Filter] + +.. Fill in the *Administrator DN* field for the {productname} administrator account. This account must be able to login and view the records for all users accounts. For example: `uid=,ou=Users,o=,dc=,dc=com`. ++ +image:authentication-ldap-admin-dn.png[Administrator DN] + +.. Fill in the *Administrator DN Password* field. This is the password for the administrator distinguished name. ++ +[IMPORTANT] +==== +The password for this field is stored in plaintext inside of the `config.yaml` file. Setting up a dedicated account of using a password hash is highly recommended. +==== + +.. Optional. Fill in the *UID Attribute* field. This is the name of the property field in the LDAP user records that stores your user's username. Most commonly, *uid* is entered for this field. This field can be used to log into your {productname} deployment. ++ +image:uid-attribute-ldap.png[UID Attribute] + +.. Optional. Fill in the *Mail Attribute* field. This is the name of the property field in your LDAP user records that stores your user's e-mail addresses. Most commonly, *mail* is entered for this field. This field can be used to log into your {productname} deployment. ++ +image:mail-attribute-ldap.png[Mail Attribute] ++ +[NOTE] +==== +* The username to log in must exist in the *User Relative DN*. +* If you are using Microsoft Active Directory to setup your LDAP deployment, you must use `sAMAccountName` for your UID attribute. +==== + +.. Optional. You can add a custom SSL/TLS certificate by clicking *Choose File* under the *Custom TLS Certificate* optionl. Additionally, you can enable fallbacks to insecure, non-TLS connections by checking the *Allow fallback to non-TLS connections* box. ++ +image:authentication-ldap-ssl.png[LDAP server SSL] ++ +If you upload an SSl/TLS certificate, you must provide an _ldaps://_ prefix, for example, `LDAP_URI: ldaps://ldap_provider.example.org`. -image:authentication-ldap-user-filter.png[User filters] +//// -* If specified, the additional filter used for all user lookup queries. Note that all Distinguished Names used in the filter must be *full* paths; the Base DN is not added automatically here. *Must* be wrapped in parens. Example: (&(someFirstField=someValue)(someOtherField=someOtherValue)) -* Here is an example of the resulting entry in the _config.yaml_ file: -.... -LDAP_USER_FILTER: (memberof=cn=developers,ou=groups,dc=example,dc=com) -.... +. Update your `config.yaml` file directly to include the following relevant information: ++ +[source,yaml] +---- +# ... +AUTHENTICATION_TYPE: LDAP <1> +# ... +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com <2> +LDAP_ADMIN_PASSWD: ABC123 <3> +LDAP_ALLOW_INSECURE_FALLBACK: false <4> +LDAP_BASE_DN: <5> + - dc=example + - dc=com +LDAP_EMAIL_ATTR: mail <6> +LDAP_UID_ATTR: uid <7> +LDAP_URI: ldap://.com <8> +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,dc=,dc=com) <9> +LDAP_USER_RDN: <10> + - ou=people +LDAP_SECONDARY_USER_RDNS: <11> + - ou= + - ou= + - ou= + - ou= +# ... +---- +<1> Required. Must be set to `LDAP`. +<2> Required. The admin DN for LDAP authentication. +<3> Required. The admin password for LDAP authentication. +<4> Required. Whether to allow SSL/TLS insecure fallback for LDAP authentication. +<5> Required. The base DN for LDAP authentication. +<6> Required. The email attribute for LDAP authentication. +<7> Required. The UID attribute for LDAP authentication. +<8> Required. The LDAP URI. +<9> Required. The user filter for LDAP authentication. +<10> Required. The user RDN for LDAP authentication. +<11> Optional. Secondary User Relative DNs if there are multiple Organizational Units where user objects are located. + +. After you have added all required LDAP fields, save the changes and restart your {productname} deployment. [id="ldap-restricted-users-enabling"] -==== Enabling the LDAP_RESTRICTED_USER_FILTER configuration field +== Enabling the LDAP_RESTRICTED_USER_FILTER configuration field -The `LDAP_RESTRICTED_USER_FILTER` configuration field is a subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. +The `LDAP_RESTRICTED_USER_FILTER` configuration field is a subset of the `LDAP_USER_FILTER` configuration field. When configured, this option allows {productname} administrators the ability to configure LDAP users as restricted users when {productname} uses LDAP as its authentication provider. Use the following procedure to enable LDAP restricted users on your {productname} deployment. @@ -109,9 +161,11 @@ Use the following procedure to enable LDAP restricted users on your {productname + [source,yaml] ---- ---- +# ... AUTHENTICATION_TYPE: LDAP ---- +# ... +FEATURE_RESTRICTED_USERS: true <1> +# ... LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com LDAP_ADMIN_PASSWD: ABC123 LDAP_ALLOW_INSECURE_FALLBACK: false @@ -123,92 +177,21 @@ LDAP_EMAIL_ATTR: mail LDAP_UID_ATTR: uid LDAP_URI: ldap://.com LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) -LDAP_RESTRICTED_USER_FILTER: (=) +LDAP_RESTRICTED_USER_FILTER: (=) <2> LDAP_USER_RDN: - ou= - o= - dc= - dc=com +# ... ---- +<1> Must be set to `true` when configuring an LDAP restricted user. +<2> Configures specified users as restricted users. . Start, or restart, your {productname} deployment. After enabling the `LDAP_RESTRICTED_USER_FILTER` feature, your LDAP {productname} users are restricted from reading and writing content, and creating organizations. - -=== Administrator DN - -image:authentication-ldap-admin-dn.png[Administrator DN] - -* The Distinguished Name and password for the administrator account. This account must be able to login and view the records for all user accounts. Example: uid=admin,ou=employees,dc=my,dc=domain,dc=com -* The password will be stored in *plaintext* inside the config.yaml, so setting up a dedicated account or using a password hash is highly recommended. -* Here is an example of the resulting entries in the _config.yaml_ file: -.... -LDAP_ADMIN_DN: cn=admin,dc=example,dc=com -LDAP_ADMIN_PASSWD: changeme -.... - -=== UID and Mail attributes - -image:authentication-ldap-uid-mail.png[UID and Mail] - -* The UID attribute is the name of the property field in LDAP user record to use as the *username*. Typically "uid". -* The Mail attribute is the name of the property field in LDAP user record that stores user e-mail address(es). Typically "mail". -* Either of these may be used during login. -* The logged in username must exist in User Relative DN. -* _sAMAccountName_ is the UID attribute for against Microsoft Active Directory setups. -* Here is an example of the resulting entries in the _config.yaml_ file: -.... -LDAP_UID_ATTR: uid -LDAP_EMAIL_ATTR: mail -.... - -=== Validation - -Once the configuration is completed, click on “Save Configuration -Changes” button to validate the configuration. - -image:authentication-ldap-success.png[Fill in LDAP information] - -All validation must succeed before proceeding, or additional configuration may be performed by selecting the "Continue Editing" button. - -[[common-issues]] -== Common Issues - -*_Invalid credentials_* - -Administrator DN or Administrator DN Password values are incorrect - -*_Verification of superuser %USERNAME% failed: Username not found The -user either does not exist in the remote authentication system OR LDAP -auth is misconfigured._* - -{productname} can connect to the LDAP server via Username/Password specified in -the Administrator DN fields however cannot find the current logged in -user with the UID Attribute or Mail Attribute fields in the User -Relative DN Path. Either current logged in user does not exist in User -Relative DN Path, or Administrator DN user do not have rights to -search/read this LDAP path. - -[[configure-ldap-superuser]] -== Configure an LDAP user as superuser -Once LDAP is configured, you can log in to your {productname} -instance with a valid LDAP username and password. -You are prompted to confirm your {productname} username as shown in the following figure: - -image:confirm-ldap-username.png[Confirm LDAP username for {productname}] - -To attach superuser privilege to an LDAP user, modify the _config.yaml_ file -with the username. For example: - -.... -SUPER_USERS: -- testadmin -.... - -Restart the Red Hat `Quay` container with the updated config.yaml file. -The next time you log in, the user will have superuser privileges. - [id="ldap-super-users-enabling"] == Enabling the LDAP_SUPERUSER_FILTER configuration field @@ -227,9 +210,9 @@ Use the following procedure to enable LDAP superusers on your {productname} depl + [source,yaml] ---- ---- +# ... AUTHENTICATION_TYPE: LDAP ---- +# ... LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com LDAP_ADMIN_PASSWD: ABC123 LDAP_ALLOW_INSECURE_FALLBACK: false @@ -241,13 +224,15 @@ LDAP_EMAIL_ATTR: mail LDAP_UID_ATTR: uid LDAP_URI: ldap://.com LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) -LDAP_SUPERUSER_FILTER: (=) +LDAP_SUPERUSER_FILTER: (=) <1> LDAP_USER_RDN: - ou= - o= - dc= - dc=com +# ... ---- +<1> Configures specified users as superusers. . Start, or restart, your {productname} deployment. @@ -260,3 +245,27 @@ After enabling the `LDAP_SUPERUSER_FILTER` feature, your LDAP {productname} user * Query the usage logs * Create globally visible user messages +[id="common-ldap-configuration-issues"] +== Common LDAP configuration issues + +The following errors might be returned with an invalid configuration. + +* **Invalid credentials**. If you receive this error, the Administrator DN or Administrator DN password values are incorrect. Ensure that you are providing accurate Administrator DN and password values. + +* **Verification of superuser %USERNAME% failed*. This error is returned for the following reasons: + +** The username has not been found. +** The user does not exist in the remote authentication system. +** LDAP authorization is configured improperly. + +* **Cannot find the current logged in user**. When configuring LDAP for {productname}, there may be situations where the LDAP connection is established successfully using the username and password provided in the *Administrator DN* fields. However, if the current logged-in user cannot be found within the specified *User Relative DN* path using the *UID Attribute* or *Mail Attribute* fields, there are typically two potential reasons for this: + +** The current logged in user does not exist in the *User Relative DN* path. +** The *Administrator DN* does not have rights to search or read the specified LDAP path. ++ +To fix this issue, ensure that the logged in user is included in the *User Relative DN* path, or provide the correct permissions to the *Administrator DN* account. + +[id="ldap-configuration-fields-link"] +== LDAP configuration fields + +For a full list of LDAP configuration fields, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-ldap[LDAP configuration fields] \ No newline at end of file diff --git a/modules/proc_manage-log-storage-elasticsearch.adoc b/modules/proc_manage-log-storage-elasticsearch.adoc new file mode 100644 index 000000000..6976121da --- /dev/null +++ b/modules/proc_manage-log-storage-elasticsearch.adoc @@ -0,0 +1,67 @@ +[id="proc_manage-log-storage-elasticsearch"] += Configuring action log storage for Elasticsearch + +[NOTE] +==== +To configure action log storage for Elasticsearch, you must provide your own Elasticsearch stack; it is not included with {productname} as a customizable component. +==== + +Enabling Elasticsearch logging can be done during {productname} deployment or post-deployment by updating your `config.yaml` file. When configured, usage log access continues to be provided through the web UI for repositories and organizations. + +Use the following procedure to configure action log storage for Elasticsearch: + +.Procedure + +. Obtain an Elasticsearch account. + +. Update your {productname} `config.yaml` file to include the following information: ++ +[source,yaml] +---- +# ... +LOGS_MODEL: elasticsearch <1> +LOGS_MODEL_CONFIG: + producer: elasticsearch <2> + elasticsearch_config: + host: http://: <3> + port: 9200 <4> + access_key: <5> + secret_key: <6> + use_ssl: True <7> + index_prefix: <8> + aws_region: <9> +# ... +---- +<1> The method for handling log data. +<2> Choose either Elasticsearch or Kinesis to direct logs to +an intermediate Kinesis stream on AWS. You need to set up your own pipeline to +send logs from Kinesis to Elasticsearch, for example, Logstash. +<3> The hostname or IP address of the system providing +the Elasticsearch service. +<4> The port number providing the Elasticsearch service on the host +you just entered. Note that the port must be accessible from all systems +running the {productname} registry. The default is TCP port `9200`. +<5> The access key needed to gain access to the Elasticsearch +service, if required. +<6> The secret key needed to gain access to the Elasticsearch +service, if required. +<7> Whether to use SSL/TLS for Elasticsearch. Defaults to `True`. +<8> Choose a prefix to attach to log entries. +<9> If you are running on AWS, set the AWS region (otherwise, leave it blank). + +. Optional. If you are using Kinesis as your logs producer, you must include the following fields in your `config.yaml` file: ++ +[source,yaml] +---- + kinesis_stream_config: + stream_name: <1> + access_key: <2> + secret_key: <3> + aws_region: <4> +---- +<1> The name of the Kinesis stream. +<2> The name of the AWS access key needed to gain access to the Kinesis stream, if required. +<3> The name of the AWS secret key needed to gain access to the Kinesis stream, if required. +<4> The Amazon Web Services (AWS) region. + +. Save your `config.yaml` file and restart your {productname} deployment. diff --git a/modules/proc_manage-log-storage-splunk.adoc b/modules/proc_manage-log-storage-splunk.adoc new file mode 100644 index 000000000..41031634d --- /dev/null +++ b/modules/proc_manage-log-storage-splunk.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="proc_manage-log-storage-splunk"] += Configuring action log storage for Splunk + +link:https://www.splunk.com/[Splunk] is an alternative to Elasticsearch that can provide log analyses for your {productname} data. + +Enabling Splunk logging can be done during {productname} deployment or post-deployment using the configuration tool. Configuration includes both the option to forward action logs directly to Splunk or to the Splunk HTTP Event Collector (HEC). + +Use the following procedures to enable Splunk for your {productname} deployment. \ No newline at end of file diff --git a/modules/proc_manage-log-storage.adoc b/modules/proc_manage-log-storage.adoc index a507f90ce..9cb37c456 100644 --- a/modules/proc_manage-log-storage.adoc +++ b/modules/proc_manage-log-storage.adoc @@ -1,60 +1,4 @@ -[[proc_manage-log-storage]] -= Configuring action log storage for Elasticsearch +[id="proc_manage-log-storage"] += Configuring action log storage for Elasticsearch and Splunk -By default, the past three months of usage logs are stored in the {productname} database -and exposed via the web UI on organization and repository levels. Appropriate administrative -privileges are required to see log entries. For deployments with a large amount of logged operations, you can now store -the usage logs in Elasticsearch instead of the {productname} database backend. -To do this, you need to provide your own Elasticsearch stack, as it is not included with -{productname} as a customizable component. - -Enabling Elasticsearch logging can be done during {productname} deployment -or post-deployment using the {productname} Config Tool. The resulting -configuration is stored in the `config.yaml` file. -Once configured, usage log access continues to be provided the same way, via the web UI -for repositories and organizations. - -Here's how to configure action log storage to change it from the default -{productname} database to use Elasticsearch: - -. Obtain an Elasticsearch account. -. Open the {productname} Config Tool (either during or after {productname} deployment). -. Scroll to the _Action Log Storage Configuration_ setting and select -_Elasticsearch_ instead of _Database_. The following figure shows the Elasticsearch settings -that appear: -+ -image:elasticsearch_action_logs.png[Choose Elasticsearch to view settings to store logs] - -. Fill in the following information for your Elasticsearch instance: -+ -* **Elasticsearch hostname**: The hostname or IP address of the system providing -the Elasticsearch service. -* **Elasticsearch port**: The port number providing the Elasticsearch service on the host -you just entered. Note that the port must be accessible from all systems -running the {productname} registry. The default is TCP port 9200. -* **Elasticsearch access key**: The access key needed to gain access to the Elastic search -service, if required. -* **Elasticsearch secret key**: The secret key needed to gain access to the Elastic search -service, if required. -* **AWS region**: If you are running on AWS, set the AWS region (otherwise, leave it blank). -* **Index prefix**: Choose a prefix to attach to log entries. -* **Logs Producer**: Choose either Elasticsearch (default) or Kinesis to direct logs to -an intermediate Kinesis stream on AWS. You need to set up your own pipeline to -send logs from Kinesis to Elasticsearch (for example, Logstash). The following figure -shows additional fields you would need to fill in for Kinesis: -+ -image:kinesis_producer.png[On AWS optionally set up an intermediate Kinesis stream] - -. If you chose Elasticsearch as the Logs Producer, no further configuration is needed. -If you chose Kinesis, fill in the following: -+ -* **Stream name**: The name of the Kinesis stream. -* **AWS access key**: The name of the AWS access key needed to gain access to the Kinesis stream, if required. -* **AWS secret key**: The name of the AWS secret key needed to gain access to the Kinesis stream, if required. -* **AWS region**: The AWS region. - -. When you are done, save the configuration. The Config Tool checks your settings. -If there is a problem connecting to the Elasticsearch or Kinesis services, -you will see an error and have the opportunity to continue editing. Otherwise, -logging will begin to be directed to your Elasticsearch configuration after the -cluster restarts with the new configuration. +By default, usage logs are stored in the {productname} database and exposed through the web UI on organization and repository levels. Appropriate administrative privileges are required to see log entries. For deployments with a large amount of logged operations, you can store the usage logs in Elasticsearch and Splunk instead of the {productname} database backend. \ No newline at end of file diff --git a/modules/proc_manage-upgrade-quay-guide.adoc b/modules/proc_manage-upgrade-quay-guide.adoc index 657882881..e36ba1411 100644 --- a/modules/proc_manage-upgrade-quay-guide.adoc +++ b/modules/proc_manage-upgrade-quay-guide.adoc @@ -73,7 +73,7 @@ should be consulted before each {productname} upgrade. [[check-the-health-of-the-upgraded-container]] == Check the health of the upgraded container -Visit the /health/endtoend endpoint on the registry hostname and verify +Visit the `/health/endtoend` endpoint on the registry hostname and verify that the code is 200 and `is_testing` is false. [[upgrade-all-containers-in-the-cluster]] diff --git a/modules/proc_setting-up-quay-for-qbo.adoc b/modules/proc_setting-up-quay-for-qbo.adoc index 10f88c90d..d67688471 100644 --- a/modules/proc_setting-up-quay-for-qbo.adoc +++ b/modules/proc_setting-up-quay-for-qbo.adoc @@ -1,5 +1,5 @@ :_content-type: PROCEDURE -[[setting-up-quay-for-qbo]] +[id="setting-up-quay-for-qbo"] = Setting up {productname} for the {qbo} In this procedure, you will create a dedicated {productname} organization, and from a new application created within that organization you will generate an OAuth token to be used with the {qbo} in {ocp}. @@ -36,5 +36,5 @@ In this procedure, you will create a dedicated {productname} organization, and f + [IMPORTANT] ==== -As of {productname} 3.7, there is no token management. You cannot list tokens, delete tokens, or modify tokens. The generated access token is only shown once and cannot be re-obtained after closing the page. +{productname} does not offer token management. You cannot list tokens, delete tokens, or modify tokens. The generated access token is only shown once and cannot be re-obtained after closing the page. ==== diff --git a/modules/proc_splunk-action-log.adoc b/modules/proc_splunk-action-log.adoc new file mode 100644 index 000000000..3e6c6e655 --- /dev/null +++ b/modules/proc_splunk-action-log.adoc @@ -0,0 +1,57 @@ +:_content-type: PROCEDURE +[id="proc_splunk-action-log"] += Creating an action log + +Use the following procedure to create a user account that can forward action logs to Splunk. + +[IMPORTANT] +==== +You must use the Splunk UI to view {productname} action logs. At this time, viewing Splunk action logs on the {productname} *Usage Logs* page is unsupported, and returns the following message: `Method not implemented. Splunk does not support log lookups`. +==== + +.Prerequisites + +* You have installed Splunk and created a username. +* You have generated a Splunk bearer token. +* You have configured your {productname} `config.yaml` file to enable Splunk. + +.Procedure + +. Log in to your {productname} deployment. + +. Click on the name of the organization that you will use to create an action log for Splunk. + +. In the navigation pane, click *Robot Accounts* -> *Create Robot Account*. + +. When prompted, enter a name for the robot account, for example `spunkrobotaccount`, then click *Create robot account*. + +. On your browser, open the Splunk UI. + +. Click *Search and Reporting*. + +. In the search bar, enter the name of your index, for example, `` and press *Enter*. ++ +The search results populate on the Splunk UI. Logs are forwarded in JSON format. A response might look similar to the following: ++ +[source,json] +---- +{ + "log_data": { + "kind": "authentication", <1> + "account": "quayuser123", <2> + "performer": "John Doe", <3> + "repository": "projectQuay", <4> + "ip": "192.168.1.100", <5> + "metadata_json": {...}, <6> + "datetime": "2024-02-06T12:30:45Z" <7> + } +} + +---- +<1> Specifies the type of log event. In this example, `authentication` indicates that the log entry relates to an authentication event. +<2> The user account involved in the event. +<3> The individual who performed the action. +<4> The repository associated with the event. +<5> The IP address from which the action was performed. +<6> Might contain additional metadata related to the event. +<7> The timestamp of when the event occurred. \ No newline at end of file diff --git a/modules/proc_splunk-config.adoc b/modules/proc_splunk-config.adoc new file mode 100644 index 000000000..d6a90d44e --- /dev/null +++ b/modules/proc_splunk-config.adoc @@ -0,0 +1,100 @@ +:_content-type: PROCEDURE +[id="proc_splunk-config"] += Configuring {productname} to use Splunk + +Use the following procedure to configure {productname} to use Splunk or the Splunk HTTP Event Collector (HEC). + +.Prerequisites + +* You have installed Splunk and created a username. +* You have generated a Splunk bearer token. + +.Procedure + +. Configure {productname} to use Splunk or the Splunk HTTP Event Collector (HEC). + +.. If opting to use Splunk, open your {productname} `config.yaml` file and add the following configuration fields: ++ +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk + splunk_config: + host: http://.remote.csb <1> + port: 8089 <2> + bearer_token: <3> + url_scheme: <4> + verify_ssl: False <5> + index_prefix: <6> + ssl_ca_path: <7> +# ... +---- +<1> String. The Splunk cluster endpoint. +<2> Integer. The Splunk management cluster endpoint port. Differs from the Splunk GUI hosted port. Can be found on the Splunk UI under *Settings* -> *Server Settings* -> *General Settings*. +<3> String. The generated bearer token for Splunk. +<4> String. The URL scheme for access the Splunk service. If Splunk is configured to use TLS/SSL, this must be `https`. +<5> Boolean. Whether to enable TLS/SSL. Defaults to `true`. +<6> String. The Splunk index prefix. Can be a new, or used, index. Can be created from the Splunk UI. +<7> String. The relative container path to a single `.pem` file containing a certificate authority (CA) for TLS/SSL validation. + +.. If opting to use Splunk HEC, open your {productname} `config.yaml` file and add the following configuration fields: ++ +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk_hec <1> + splunk_hec_config: <2> + host: prd-p-aaaaaq.splunkcloud.com <3> + port: 8088 <4> + hec_token: 12345678-1234-1234-1234-1234567890ab <5> + url_scheme: https <6> + verify_ssl: False <7> + index: quay <8> + splunk_host: quay-dev <9> + splunk_sourcetype: quay_logs <10> +# ... +---- +<1> Specify `splunk_hec` when configuring Splunk HEC. +<2> Logs model configuration for Splunk HTTP event collector action logs configuration. +<3> The Splunk cluster endpoint. +<4> Splunk management cluster endpoint port. +<5> HEC token for Splunk. +<6> The URL scheme for access the Splunk service. If Splunk is behind SSL/TLS, must be `https`. +<7> Boolean. Enable (true) or disable (false) SSL/TLS verification for HTTPS connections. +<8> The Splunk index to use. +<9> The host name to log this event. +<10> The name of the Splunk `sourcetype` to use. + +. If you are configuring `ssl_ca_path`, you must configure the SSL/TLS certificate so that {productname} will trust it. + +.. If you are using a standalone deployment of {productname}, SSL/TLS certificates can be provided by placing the certificate file inside of the `extra_ca_certs` directory, or inside of the relative container path and specified by `ssl_ca_path`. + +.. If you are using the {productname} Operator, create a config bundle secret, including the certificate authority (CA) of the Splunk server. For example: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config_390.yaml --from-file extra_ca_cert_splunkserver.crt=./splunkserver.crt config-bundle-secret +---- ++ +Specify the `conf/stack/extra_ca_certs/splunkserver.crt` file in your `config.yaml`. For example: ++ +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk + splunk_config: + host: ec2-12-345-67-891.us-east-2.compute.amazonaws.com + port: 8089 + bearer_token: eyJra + url_scheme: https + verify_ssl: true + index_prefix: quay123456 + ssl_ca_path: conf/stack/splunkserver.crt +# ... +---- \ No newline at end of file diff --git a/modules/proc_upgrade_standalone.adoc b/modules/proc_upgrade_standalone.adoc index b25fdd740..b1b1a12cb 100644 --- a/modules/proc_upgrade_standalone.adoc +++ b/modules/proc_upgrade_standalone.adoc @@ -1,30 +1,49 @@ +:_content-type: PROCEDURE +[id="standalone-upgrade"] = Standalone upgrade -In general, {productname} supports upgrades from a prior (N-1) minor version only. For example, upgrading directly from {productname} 3.0.5 to the latest version of 3.5 is not supported. Instead, users would have to upgrade as follows: +In general, single-step upgrades from prior (N-2, N-3) minor versions. This helps simplify the upgrade procedure for customers on older releases. The following upgrade paths are supported for {productname} {productmin}: -. 3.0.5 -> 3.1.3 -. 3.1.3 -> 3.2.2 -. 3.2.2 -> 3.3.4 -. 3.3.4 -> 3.4.z -. 3.4.z -> 3.5.z +* 3.11.z -> {productmin} +* 3.12.z -> {productmin} +* 3.13.z -> {productmin} -This is required to ensure that any necessary database migrations are done correctly and in the right order during the upgrade. +Upgrading to {productmin} from releases older than those listed above is unsupported. This helps ensure that any necessary database migrations are done correctly and in the right order during the upgrade. -In some cases, {productname} supports direct, single-step upgrades from prior (N-2, N-3) minor versions. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. The following upgrade paths are supported: +For users wanting to upgrade the {productname} Operator, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrading_quay_by_upgrading_the_quay_operator[Upgrading the {productname} Operator Overview]. -. 3.3.z -> 3.6.z -. 3.4.z -> 3.6.z -. 3.4.z -> 3.7.z -. 3.5.z -> 3.7.z -. 3.7.z -> 3.8.z +This document describes the steps needed to perform each individual upgrade. Determine your current version and then follow the steps in sequential order, starting with your current version and working up to your desired target version. +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_14_z_from_3_13_z[Upgrade to 3.14.z from 3.13.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_14_z_from_3_12_z[Upgrade to 3.14.z from 3.12.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_14_z_from_3_11_z[Upgrade to 3.14.z from 3.11.z] -For users wanting to upgrade via the Quay Operator, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrading_quay_by_upgrading_the_quay_operator[Upgrading Quay by upgrading the Quay Operator]. +//// +//3.13 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_13_z_from_3_12_z[Upgrade to 3.13.z from 3.12.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_13_z_from_3_11_z[Upgrade to 3.13.z from 3.11.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_13_z_from_3_10_z[Upgrade to 3.13.z from 3.10.z] -This document describes the steps needed to perform each individual upgrade. Determine your current version and then follow the steps in sequential order, starting with your current version and working up to your desired target version. +//3.12 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_12_z_from_3_11_z[Upgrade to 3.12.z from 3.11.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_12_z_from_3_10_z[Upgrade to 3.12.z from 3.10.z] + + +//3.11 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_11_z_from_3_10_z[Upgrade to 3.11.z from 3.10.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_11_z_from_3_9_z[Upgrade to 3.11.z from 3.9.z] -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_6_z[Upgrade to 3.8.z from 3.7.z] + +//3.10 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_10_z_from_3_9_z[Upgrade to 3.10.z from 3.9.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_10_z_from_3_8_z[Upgrade to 3.10.z from 3.8.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_10_z_from_3_7_z[Upgrade to 3.10.z from 3.7.z] + +//3.9 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_9_z_from_3_8_z[Upgrade to 3.9.z from 3.8.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_9_z_from_3_7_z[Upgrade to 3.9.z from 3.7.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_8_z_from_3_7_z[Upgrade to 3.8.z from 3.7.z] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_6_z[Upgrade to 3.7.z from 3.6.z] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_5_z[Upgrade to 3.7.z from 3.5.z] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_4_z[Upgrade to 3.7.z from 3.4.z] @@ -40,27 +59,142 @@ ifdef::downstream[] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_1_3_from_3_0_z[Upgrade to 3.1.3 from 3.0.5] * link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_0_5_from_2_9_5[Upgrade to 3.0.5 from 2.9.5] endif::downstream[] +//// -See the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes/index[{productname} Release Notes] for information on features for individual releases. +See the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/index[{productname} Release Notes] for information on features for individual releases. The general procedure for a manual upgrade consists of the following steps: -. Stop the Quay and Clair containers. +. Stop the `Quay` and `Clair` containers. . Backup the database and image storage (optional but recommended). . Start Clair using the new version of the image. -. Wait until Clair is ready to accept connections before starting the new version of Quay. - +. Wait until Clair is ready to accept connections before starting the new version of {productname}. +[id="accessing-images"] == Accessing images -Images for Quay 3.4.0 and later are available from link:https://registry.redhat.io[registry.redhat.io] and +{productname} image from version 3.4.0 and later are available from link:https://registry.redhat.io[registry.redhat.io] and link:https://registry.access.redhat.com[registry.access.redhat.com], with authentication set up as described in link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication]. -Images for Quay 3.3.4 and earlier are available from link:https://quay.io[quay.io], with authentication set up as described in link:https://access.redhat.com/solutions/3533201[Accessing {productname} without a CoreOS login]. +//// +[id="upgrading-clair-postgresql-database"] +== Upgrading the Clair PostgreSQL database + +If you are upgrading {productname} to version {productmin}, you must migrate your Clair PostgreSQL database version from PostgreSQL version 13 -> version 15. This requires bringing down your Clair PostgreSQL 13 database and running a migration script to initiate the process. -== Upgrade to 3.8.z from 3.7.z +Use the following procedure to upgrade your Clair PostgreSQL database from version 13 -> to version 15. + +[IMPORTANT] +==== +Clair security scans might become temporarily disrupted after the migration procedure has succeeded. +==== + +.Procedure + +. Stop the {productname} container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- -=== Target images +. Stop the Clair container by running the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Run the following Podman process from SCLOrg's link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration] procedure, which allows for data migration from a remote PostgreSQL server: ++ +[source,terminal] +---- +$ sudo podman run -d --name <1> + -e POSTGRESQL_MIGRATION_REMOTE_HOST= \ <2> + -e POSTGRESQL_MIGRATION_ADMIN_PASSWORD=remoteAdminP@ssword \ + -v \ <3> + [ OPTIONAL_CONFIGURATION_VARIABLES ] + registry.redhat.io/rhel8/postgresql-15 +---- ++ +<1> Insert a name for your Clair PostgreSQL 15 migration database. +<2> Your new Clair PostgreSQL 15 database container IP address. Can obtained by running the following command: `sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay`. +<3> You must specify a different volume mount point than the one from your initial Clair PostgreSQL 13 deployment, and modify the access control lists for said directory. For example: ++ +[source,terminal] +---- +$ mkdir -p /host/data/clair-postgresql15-directory +---- ++ +[source,terminal] +---- +$ setfacl -m u:26:-wx /host/data/clair-postgresql15-directory +---- ++ +This prevents data from being overwritten by the new container. + +. Stop the Clair PostgreSQL 13 container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. After completing the PostgreSQL migration, run the Clair PostgreSQL 15 container, using the new data volume mount from Step 3, for example, ``: ++ +[source,terminal] +---- +$ sudo podman run -d --rm --name \ + -e POSTGRESQL_USER= \ + -e POSTGRESQL_PASSWORD= \ + -e POSTGRESQL_DATABASE= \ + -e POSTGRESQL_ADMIN_PASSWORD= \ + -p 5433:5432 \ + -v \ + registry.redhat.io/rhel8/postgresql-15 +---- + +. Start the {productname} container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 --name=quay \ +-v /home//quay-poc/config:/conf/stack:Z \ +-v /home//quay-poc/storage:/datastorage:Z \ +{productrepo}/{quayimage}:{productminv} +---- + +. Start the Clair container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +registry.redhat.io/quay/clair-rhel8:{productminv} +---- + +For more information, see link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration]. +//// + +== Upgrade to 3.14.z from 3.13.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +== Upgrade to 3.14.z from 3.12.z + +=== Target images * **Quay:** {productrepo}/{quayimage}:{productminv} ifdef::downstream[] * **Clair:** {productrepo}/{clairimage}:{productminv} @@ -68,8 +202,305 @@ endif::downstream[] ifdef::upstream[] * **Clair:** {productrepo}/{clairimage}:{clairproductminv} endif::upstream[] -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +== Upgrade to 3.14.z from 3.11.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +//// +== Upgrade to 3.13.z from 3.12.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +== Upgrade to 3.13.z from 3.11.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +== Upgrade to 3.13.z from 3.10.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + + +== Upgrade to 3.12.z from 3.11.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.12.z from 3.10.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + + +== Upgrade to 3.11.z from 3.10.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.11.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.11.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.11.z from 3.9.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.11.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}::v3.11.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + + +== Upgrade to 3.10.z from 3.9.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.10.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.10.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.10.z from 3.8.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.10.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.10.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.10.z from 3.7.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.10.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.10.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + + +== Upgrade to 3.9.z from 3.8.z + +If you are upgrading your standalone {productname} deployment from 3.8.z -> 3.9, it is highly recommended that you upgrade PostgreSQL from version 10 -> 13. To upgrade PostgreSQL from 10 -> 13, you must bring down your PostgreSQL 10 database and run a migration script to initiate the process. + +Use the following procedure to upgrade PostgreSQL from 10 -> 13 on a standalone {productname} deployment. + +.Procedure + +. Enter the following command to scale down the {productname} container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Optional. If you are using Clair, enter the following command to stop the Clair container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Run the Podman process from SCLOrg's link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration] procedure, which allows for data migration from a remote PostgreSQL server: ++ +[source,terminal] +---- +$ sudo podman run -d --name <1> + -e POSTGRESQL_MIGRATION_REMOTE_HOST=172.17.0.2 \ <2> + -e POSTGRESQL_MIGRATION_ADMIN_PASSWORD=remoteAdminP@ssword \ + -v <3> + [ OPTIONAL_CONFIGURATION_VARIABLES ] + rhel8/postgresql-13 +---- ++ +<1> The name of your PostgreSQL 13 migration database. +<2> Your current {productname} PostgreSQL 13 database container IP address. Can obtained by running the following command: `sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay`. +<3> You must specify a different volume mount point than the one from your initial PostgreSQL 10 deployment, and modify the access control lists for said directory. For example: ++ +[source,terminal] +---- +$ mkdir -p /host/data/directory +---- ++ +[source,terminal] +---- +$ setfacl -m u:26:-wx /host/data/directory +---- ++ +This prevents data from being overwritten by the new container. + +. Optional. If you are using Clair, repeat the previous step for the Clair PostgreSQL database container. + +. Stop the PostgreSQL 10 container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. After completing the PostgreSQL migration, run the PostgreSQL 13 container, using the new data volume mount from Step 3, for example, ``: ++ +[source,terminal] +---- +$ sudo podman run -d --rm --name postgresql-quay \ + -e POSTGRESQL_USER= \ + -e POSTGRESQL_PASSWORD= \ + -e POSTGRESQL_DATABASE= \ + -e POSTGRESQL_ADMIN_PASSWORD= \ + -p 5432:5432 \ + -v \ + registry.redhat.io/rhel8/postgresql-13:1-109 +---- + +. Optional. If you are using Clair, repeat the previous step for the Clair PostgreSQL database container. + +. Start the {productname} container: ++ +[source,terminal] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 --name=quay \ +-v /home//quay-poc/config:/conf/stack:Z \ +-v /home//quay-poc/storage:/datastorage:Z \ +{productrepo}/{quayimage}:{productminv} +---- + +. Optional. Restart the Clair container, for example: ++ +[source,terminal] +---- +$ sudo podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +registry.redhat.io/quay/clair-rhel8:v3.9.0 +---- + +For more information, see link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration]. + +//updating target images is where you left + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.9.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.9.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.9.z from 3.7.z + +If you are upgrading your standalone {productname} deployment from 3.7.z -> 3.9, it is highly recommended that you upgrade PostgreSQL from version 10 -> 13. To upgrade PostgreSQL from 10 -> 13, you must bring down your PostgreSQL 10 database and run a migration script to initiate the process: + +[NOTE] +==== +* When upgrading from {productname} 3.7 to 3.9, you might receive the following error: `pg_dumpall: error: query failed: ERROR: xlog flush request 1/B446CCD8 is not satisfied --- flushed only to 1/B0013858`. As a workaround to this issue, you can delete the `quayregistry-clair-postgres-upgrade` job on your {ocp} deployment, which should resolve the issue. +==== + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.9.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.9.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.8.z from 3.7.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.8.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.8.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} == Upgrade to 3.7.z from 3.6.z @@ -81,8 +512,8 @@ endif::downstream[] ifdef::upstream[] * **Clair:** {productrepo}/{clairimage}:{clairproductminv} endif::upstream[] -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} == Upgrade to 3.7.z from 3.5.z @@ -94,8 +525,8 @@ endif::downstream[] ifdef::upstream[] * **Clair:** {productrepo}/{clairimage}:{clairproductminv} endif::upstream[] -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} == Upgrade to 3.7.z from 3.4.z @@ -107,8 +538,8 @@ endif::downstream[] ifdef::upstream[] * **Clair:** {productrepo}/{clairimage}:{clairproductminv} endif::upstream[] -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} == Upgrade to 3.7.z from 3.3.z @@ -124,11 +555,11 @@ endif::downstream[] ifdef::upstream[] * **Clair:** {productrepo}/{clairimage}:{clairproductminv} endif::upstream[] -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} == Upgrade to 3.6.z from 3.4.z -+ + [NOTE] ==== {productname} 3.6 supports direct, single-step upgrade from 3.4.z. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. @@ -146,11 +577,11 @@ endif::downstream[] ifdef::upstream[] * **Clair:** {productrepo}/{clairimage}:{clairproductminv} endif::upstream[] -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} == Upgrade to 3.6.z from 3.3.z -+ + [NOTE] ==== {productname} 3.6 supports direct, single-step upgrade from 3.3.z. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. @@ -168,8 +599,8 @@ endif::downstream[] ifdef::upstream[] * **Clair:** {productrepo}/{clairimage}:{clairproductminv} endif::upstream[] -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} === Swift configuration when upgrading from 3.3.z to 3.6 @@ -198,15 +629,15 @@ When upgrading from {productname} 3.3.z to 3.6.z, some users might receive the f ifdef::downstream[] * **Quay:** {productrepo}/{quayimage}:v3.5.7 * **Clair:** {productrepo}/{clairimage}:{productminv} -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) endif::downstream[] ifdef::upstream[] * **Quay:** {productrepo}/{quayimage}:v3.5.1 * **Clair:** {productrepo}/{clairimage}:{clairproductminv} -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) endif::upstream[] @@ -218,8 +649,8 @@ Upgrading to Quay 3.4 requires a database migration which does not support downg === Target images * **Quay:** {productrepo}/{quayimage}:v3.4.6 * **Clair:** {productrepo}/{clairimage}:{productminv} -* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-10:1 -* **Redis:** registry.redhat.io/rhel8/redis-5:1 +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) == Upgrade to 3.3.4 from 3.2.z @@ -298,3 +729,4 @@ include::con_upgrade_v3.adoc[leveloffset=+2] include::proc_upgrade_v3.adoc[leveloffset=+2] endif::downstream[] +//// \ No newline at end of file diff --git a/modules/proc_use-api.adoc b/modules/proc_use-api.adoc index 8ef72cefc..7d3ecd9a8 100644 --- a/modules/proc_use-api.adoc +++ b/modules/proc_use-api.adoc @@ -1,23 +1,8 @@ -= Using the {productname} API +:_content-type: CONCEPT [id="using-the-api"] += Using the {productname} API -// Module included in the following assemblies: -// -// - -{productname} provides a full link:https://oauth.net/2/[OAuth 2], RESTful API that: - -* Is available from endpoints of each {productname} instance from the URL -https:///api/v1 -* Lets you connect to endpoints, via a browser, to get, delete, post, and put {productname} settings -by enabling the Swagger UI -* Can be accessed by applications that make API calls and use OAuth tokens -* Sends and receives data as JSON - -The following text describes how to access the {productname} API and -use it to view and modify setting in your {productname} cluster. -The next section lists and describes API endpoints. - +[id="accessing-quay-io-api"] == Accessing the Quay API from Quay.io If you don't have your own {productname} cluster running yet, you can explore the {productname} API available from Quay.io @@ -47,25 +32,8 @@ of the endpoint. Open an endpoint, enter any required parameters (such as a repository name or image), then select the `Try it out!` button to query or change settings associated with a Quay.io endpoint. -== Create OAuth access token - -To create an OAuth access token so you can access the API for your organization: - -. Log in to {productname} and select your Organization (or create a new one). - -. Select the Applications icon from the left navigation. - -. Select Create New Application and give the new application a name when prompted. - -. Select the new application. - -. Select Generate Token from the left navigation. -. Select the checkboxes to set the scope of the token and select Generate Access Token. -. Review the permissions you are allowing and select Authorize Application to approve it. - -. Copy the newly generated token to use to access the API. == Accessing your Quay API from a web browser @@ -94,11 +62,7 @@ add the following line to the `config.yaml` on all nodes in the cluster and rest BROWSER_API_CALLS_XHR_ONLY: false ``` -== Accessing the {productname} API from the command line - -You can use the `curl` command to GET, PUT, POST, or DELETE settings via the API -for your {productname} cluster. Replace `` with the OAuth access token you -created earlier to get or change settings in the following examples. +//// === Get superuser information @@ -156,7 +120,7 @@ $ curl -H "Content-Type: application/json" -H "Authorization: Bearer Fava2kV9C9 ** The returned content includes a generated password for the new user account: + [source,json] ----- +---- { "username": "quaysuper", "email": "quaysuper@example.com", @@ -389,7 +353,7 @@ $ curl -X GET -k -H "Authorization: Bearer qz9NZ2Np1f55CSZ3RVOvxjeUdkzYuCp0pKggA To enable directory synchronization for the team `newteam` in organization `testadminorg`, where the corresponding group name in LDAP is `ldapgroup`: ``` -$ curl -X POST -H "Authorization: Bearer 9rJYBR3v3pXcj5XqIA2XX6Thkwk4gld4TCYLLWDF" \ +$ curl -X POST -H "Authorization: Bearer 9rJYBR3v3pXcj5XqIA2XX6Thkwk4gld4TCYLLWDF" \ -H "Content-type: application/json" \ -d '{"group_dn": "cn=ldapgroup,ou=Users"}' \ http://quay1-server:8080/api/v1/organization/testadminorg/team/newteam/syncing @@ -461,6 +425,28 @@ print(r.text) $ curl -X POST https://quay.io/api/v1/repository \ -H 'Authorization: Bearer {token}' \ -H 'Content-Type: application/json' \ - -d '{"namespace":"yournamespace", "repository":"yourreponame", + -d '{"namespace":"yournamespace", "repository":"yourreponame", "description":"descriptionofyourrepo", "visibility": "private"}' | jq ``` + +[id="api-create-mirrored-repo"] +=== Create a mirrored repository + +.Minimal configuration +[source,terminal] +---- +curl -X POST + -H "Authorization: Bearer ${bearer_token}" + -H "Content-Type: application/json" + --data '{"external_reference": "quay.io/minio/mc", "external_registry_username": "", "sync_interval": 600, "sync_start_date": "2021-08-06T11:11:39Z", "root_rule": {"rule_kind": "tag_glob_csv", "rule_value": [ "latest" ]}, "robot_username": "orga+robot"}' https://${quay_registry}/api/v1/repository/${orga}/${repo}/mirror | jq +---- + +.Extended configuration +[source,terminal] +---- +$ curl -X POST + -H "Authorization: Bearer ${bearer_token}" + -H "Content-Type: application/json" + --data '{"is_enabled": true, "external_reference": "quay.io/minio/mc", "external_registry_username": "username", "external_registry_password": "password", "external_registry_config": {"unsigned_images":true, "verify_tls": false, "proxy": {"http_proxy": "http://proxy.tld", "https_proxy": "https://proxy.tld", "no_proxy": "domain"}}, "sync_interval": 600, "sync_start_date": "2021-08-06T11:11:39Z", "root_rule": {"rule_kind": "tag_glob_csv", "rule_value": [ "*" ]}, "robot_username": "orga+robot"}' https://${quay_registry}/api/v1/repository/${orga}/${repo}/mirror | jq +---- +//// \ No newline at end of file diff --git a/modules/proc_use-quay-build-dockerfiles.adoc b/modules/proc_use-quay-build-dockerfiles.adoc index 6796b07d2..1185af712 100644 --- a/modules/proc_use-quay-build-dockerfiles.adoc +++ b/modules/proc_use-quay-build-dockerfiles.adoc @@ -1,85 +1,87 @@ -= Building Dockerfiles +:_content-type: CONCEPT +[id="building-dockerfiles"] += Building container images -{productname} supports the ability to build -http://docs.docker.com/reference/builder/[Dockerfiles] on our build -fleet and push the resulting image to the repository. +Building container images involves creating a blueprint for a containerized application. Blueprints rely on base images from other public repositories that define how the application should be installed and configured. -[[viewing-and-managing-builds]] -== Viewing and managing builds - -Repository Builds can be viewed and managed by clicking the Builds tab -in the `Repository View`. - -[[manually-starting-a-build]] -== Manually starting a build - -To manually start a repository build, click the `+` icon in the top -right of the header on any repository page and choose `New Dockerfile -Build`. An uploaded `Dockerfile`, `.tar.gz`, or an HTTP URL to either -can be used for the build. +ifeval::["{context}" == "quay-io"] [NOTE] ==== -You will not be able to specify the -Docker build context when manually starting a build. +Because blueprints rely on images from other public repositories, they might be subject to rate limiting. Consequently, your build _could_ fail. ==== +endif::[] + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +ifeval::["{context}" == "quay-builders-image-automation"] +{productname} +endif::[] +supports the ability to build Docker and Podman container images. This functionality is valuable for developers and organizations who rely on container and container orchestration. + +ifeval::["{context}" == "quay-io"] +On {quayio}, this feature works the same across both free, and paid, tier plans. -[[build-triggers]] -== Build Triggers - -Repository builds can also be automatically triggered by events such as -a push to an SCM (GitHub, BitBucket or GitLab) or via -link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/#webhook[a call to a webhook]. - -[[creating-a-new-build-trigger]] -=== Creating a new build trigger - -To setup a build trigger, click the `Create Build Trigger` button on the -Builds view page and follow the instructions of the dialog. You will -need to grant {productname} access to your repositories in order to setup the -trigger and your account _requires admin access on the SCM repository_. - -[[manually-triggering-a-build-trigger]] -=== Manually triggering a build trigger - -To trigger a build trigger manually, click the icon next to the build -trigger and choose `Run Now`. - -[[build-contexts]] -=== Build Contexts - -When building an image with Docker, a directory is specified to become -the build context. This holds true for both manual builds and build -triggers because the builds conducted by {productname} are no different from -running `docker build` on your own machine. - -{productname} build contexts are -always the specified _subdirectory_ from the build setup and fallback to -the root of the build source if none is specified. When a build is -triggered, {productname} build workers clone the git repository to the worker -machine and enter the build context before conducting a build. - -For builds based on tar archives, build workers extract the archive and -enter the build context. For example: - -``` +[NOTE] +==== +{quayio} limits the number of simultaneous builds that a single user can submit at one time. +==== +endif::[] + +[id="build-contexts"] +== Build contexts + +When building an image with Docker or Podman, a directory is specified to become the _build context_. This is true for both manual Builds and Build triggers, because the Build that is created by +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +is not different than running `docker build` or `podman build` on your local machine. + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +Build contexts are always specified in the _subdirectory_ from the Build setup, and fallback to the root of the Build source if a directory is not specified. + +When a build is triggered, +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +Build workers clone the Git repository to the worker machine, and then enter the Build context before conducting a Build. + +For Builds based on `.tar` archives, Build workers extract the archive and enter the Build context. For example: + +.Extracted Build archive +[source,terminal] +---- example ├── .git ├── Dockerfile ├── file └── subdir └── Dockerfile -``` +---- -Imagine the example above is the directory structure for a GitHub -repository called "example". If no subdirectory is specified in the -build trigger setup or while manually starting a build, the build will -operate in the example directory. +Imagine that the _Extracted Build archive_ is the directory structure got a Github repository called *example.* If no subdirectory is specified in the Build trigger setup, or when manually starting the Build, the Build operates in the example directory. -If `subdir` is specified to be the -subdirectory in the build trigger setup, only the Dockerfile within it -is visible to the build. This means that you cannot use the `ADD` -command in the Dockerfile to add `file`, because it is outside of the -build context. +If a subdirectory is specified in the Build trigger setup, for example, `subdir`, only the Dockerfile within it is visible to the Build. This means that you cannot use the `ADD` command in the Dockerfile to add `file`, because it is outside of the Build context. -Unlike the Docker Hub, the Dockerfile is part of the build context on -{productname}. Thus, it must not appear in the `.dockerignore` file. +Unlike Docker Hub, the Dockerfile is part of the Build context on +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +As a result, it must not appear in the `.dockerignore` file. \ No newline at end of file diff --git a/modules/proc_use-quay-build-workers-dockerfiles.adoc b/modules/proc_use-quay-build-workers-dockerfiles.adoc index eeb3a67f1..f593f298e 100644 --- a/modules/proc_use-quay-build-workers-dockerfiles.adoc +++ b/modules/proc_use-quay-build-workers-dockerfiles.adoc @@ -1,410 +1,11 @@ -[[build-support]] -= Automatically building Dockerfiles with Build workers +:_content-type: CONCEPT +[id="bare-metal-builds"] += Bare metal builds with {productname-ocp} -{productname} supports building Dockerfiles using a set of worker nodes on OpenShift or Kubernetes. Build triggers, such as GitHub webhooks can be configured to automatically build new versions of your repositories when new code is committed. This document will walk you through enabling builds with your {productname} installation and setting up one or more OpenShift/K8s clusters to accept builds from {productname}. -With {productname} 3.4, the underlying Build Manager has been completely re-written as part of {productname}'s migration from Python 2 to Python 3. As a result, builder nodes are now dynamically created as Kubernetes Jobs versus builder nodes that ran continuously in {productname} 3.3 and earlier. This greatly simplifies how {productname} manages builds and provides the same mechanism quay.io utilizes to handle thousands of container image builds daily. Customers who are currently running static (“Enterprise” builders under {productname} 3.3) will be required to migrate to a Kubernetes-based build mechanism. +ifeval::["{context}" == "use-quay"] +Documentation for the _builds_ feature has been moved to link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/builders_and_image_automation/index[Builders and image automation]. This chapter will be removed in a future version of {productname}. +endif::[] -[[architecture-overview]] -== Architecture Overview -The {productname} Build system is designed for scalability (since it is used to host all builds at quay.io). The Build Manager component of {productname} provides an orchestration layer that tracks build requests and ensures that a Build Executor (OpenShift/K8s cluster) will carry out each request. Each build is handled by a Kubernetes Job which launches a small virtual machine to completely isolate and contain the image build process. This ensures that container builds do not affect each other or the underlying build system. Multiple Executors can be configured to ensure that builds are performed even in the event of infrastructure failures. {productname} will automatically send builds to a different Executor if it detects that one Executor is having difficulties. - -ifdef::downstream[] -[NOTE] -==== -The upstream version of {productname} provides instructions on how to configure an AWS/EC2 based Executor. This configuration is not supported for {productname} customers. -==== -endif::downstream[] - -=== Build manager -The build manager is responsible for the lifecycle of scheduled build. Operations requiring updating the build queue, build phase and running jobs’ status is handled by the build manager. - -=== Build workers’ control plane -Build jobs are run on separate worker nodes, and are scheduled on separate control planes (executor). Currently, {productname} supports running jobs on AWS and Kubernetes. Builds are executed using quay.io/quay/quay-builder. On AWS, builds are scheduled on EC2 instances. On k8s, the builds are scheduled as job resources. - -=== Orchestrator -The orchestrator is used to store the state of currently running build jobs, and publish events for the build manager to consume. e.g expiry events. Currently, the supported orchestrator backend is Redis. - - -[[openshift-requirements]] -== OpenShift Requirements -{productname} builds are supported on Kubernetes and OpenShift 4.5 and higher. A bare metal (non-virtualized) worker node is required since build pods require the ability to run kvm virtualization. Each build is done in an ephemeral virtual machine to ensure complete isolation and security while the build is running. In addition, your OpenShift cluster should permit the ServiceAccount associated with {productname} builds to run with the necessary SecurityContextConstraint to support privileged containers. - - -[[orchestrator-requirements]] -== Orchestrator Requirements -The {productname} builds need access to a Redis instance to track build status information. It is acceptable to use the same Redis instance already deployed with your {productname} installation. All build queues are managed in the {productname} database so there is no need for a highly available Redis instance. - - - -[[setting-up-builders]] -== Setting Up {productname} Builders With OpenShift - -=== OpenShift TLS component - -The `tls` component allows you to control TLS configuration. - -[NOTE] -==== -{productname} 3.7 does not support builders when the TLS component is managed by the Operator. -==== - -If you set `tls` to `unmanaged`, you supply your own `ssl.cert` and `ssl.key` files. In this instance, if you want your cluster to support builders, you must add both the Quay route and the builder route name to the SAN list in the cert, or alternatively use a wildcard. To add the builder route, use the following format: - -[source,bash] ----- -[quayregistry-cr-name]-quay-builder-[ocp-namespace].[ocp-domain-name] ----- - - - - -=== Prepare OpenShift for {productname} Builds -There are several actions that are needed on an OpenShift cluster before it can accept builds from {productname}. - -. Create a project where builds will be run (e.g. ‘builder’) -+ -``` -$ oc new-project builder -``` -+ -. Create a `ServiceAccount` in this `Project` that will be used to run builds. Ensure that it has sufficient privileges to create `Jobs` and `Pods`. Copy the `ServiceAccount`’s token for use later. -+ -``` -$ oc create sa -n builder quay-builder -$ oc policy add-role-to-user -n builder edit system:serviceaccount:builder:quay-builder -$ oc sa get-token -n builder quay-builder -``` -+ -. Identify the URL for the OpenShift cluster’s API server. This can be found from the OpenShift Console. -. Identify a worker node label to be used when scheduling build `Jobs`. Because build pods need to run on bare metal worker nodes, typically these are identified with specific labels. Check with your cluster administrator to determine exactly which node label should be used. -. If the cluster is using a self-signed certificate, get the kube apiserver’s CA to add to {productname}’s extra certs. -.. Get the name of the secret containing the CA: -+ -``` -$ oc get sa openshift-apiserver-sa --namespace=openshift-apiserver -o json | jq '.secrets[] | select(.name | contains("openshift-apiserver-sa-token"))'.name -``` -+ -.. Get the `ca.crt` key value from the secret in the Openshift console. The value should begin with “-----BEGIN CERTIFICATE-----” -.. Import the CA in {productname} using the ConfigTool. Ensure the name of this file matches `K8S_API_TLS_CA`. -. Create the necessary security contexts/role bindings for the `ServiceAccount`: -[source,yaml] ----- -apiVersion: security.openshift.io/v1 -kind: SecurityContextConstraints -metadata: - name: quay-builder -priority: null -readOnlyRootFilesystem: false -requiredDropCapabilities: null -runAsUser: - type: RunAsAny -seLinuxContext: - type: RunAsAny -seccompProfiles: -- '*' -supplementalGroups: - type: RunAsAny -volumes: -- '*' -allowHostDirVolumePlugin: true -allowHostIPC: true -allowHostNetwork: true -allowHostPID: true -allowHostPorts: true -allowPrivilegeEscalation: true -allowPrivilegedContainer: true -allowedCapabilities: -- '*' -allowedUnsafeSysctls: -- '*' -defaultAddCapabilities: null -fsGroup: - type: RunAsAny ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: quay-builder-scc - namespace: builder -rules: -- apiGroups: - - security.openshift.io - resourceNames: - - quay-builder - resources: - - securitycontextconstraints - verbs: - - use ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: quay-builder-scc - namespace: builder -subjects: -- kind: ServiceAccount - name: quay-builder -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: quay-builder-scc ----- - -=== Enable Builders and add Build Configuration to {productname}’s Configuration Bundle - -. Ensure that you’ve got Builds enabled in your {productname} configuration. -[source,yaml] ----- -FEATURE_BUILD_SUPPORT: True ----- -. Add the following to your {productname} configuration bundle, replacing each value with a value specific to your installation. - -[NOTE] -==== -Currently only the Build feature itself can be enabled via the {productname} Config Tool. The actual configuration of the Build Manager and Executors must be done manually in the config.yaml file. -==== -[source,yaml] ----- -BUILD_MANAGER: -- ephemeral -- ALLOWED_WORKER_COUNT: 1 - ORCHESTRATOR_PREFIX: buildman/production/ - ORCHESTRATOR: - REDIS_HOST: quay-redis-host - REDIS_PASSWORD: quay-redis-password - REDIS_SSL: true - REDIS_SKIP_KEYSPACE_EVENT_SETUP: false - EXECUTORS: - - EXECUTOR: kubernetes - BUILDER_NAMESPACE: builder - K8S_API_SERVER: api.openshift.somehost.org:6443 - K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build_cluster.crt - VOLUME_SIZE: 8G - KUBERNETES_DISTRIBUTION: openshift - CONTAINER_MEMORY_LIMITS: 5120Mi - CONTAINER_CPU_LIMITS: 1000m - CONTAINER_MEMORY_REQUEST: 3968Mi - CONTAINER_CPU_REQUEST: 500m - NODE_SELECTOR_LABEL_KEY: beta.kubernetes.io/instance-type - NODE_SELECTOR_LABEL_VALUE: n1-standard-4 - CONTAINER_RUNTIME: podman - SERVICE_ACCOUNT_NAME: ***** - SERVICE_ACCOUNT_TOKEN: ***** - QUAY_USERNAME: quay-username - QUAY_PASSWORD: quay-password - WORKER_IMAGE: /quay-quay-builder - WORKER_TAG: some_tag - BUILDER_VM_CONTAINER_IMAGE: /quay-quay-builder-qemu-rhcos:v3.4.0 - SETUP_TIME: 180 - MINIMUM_RETRY_THRESHOLD: - SSH_AUTHORIZED_KEYS: - - ssh-rsa 12345 someuser@email.com - - ssh-rsa 67890 someuser2@email.com ----- - -Each configuration field is explained below. - -ALLOWED_WORKER_COUNT:: Defines how many Build Workers are instantiated per {productname} Pod. Typically this is ‘1’. -ORCHESTRATOR_PREFIX:: Defines a unique prefix to be added to all Redis keys (useful to isolate Orchestrator values from other Redis keys). -REDIS_HOST:: Hostname for your Redis service. -REDIS_PASSWORD:: Password to authenticate into your Redis service. -REDIS_SSL:: Defines whether or not your Redis connection uses SSL. -REDIS_SKIP_KEYSPACE_EVENT_SETUP:: By default, {productname} does not set up the keyspace events required for key events at runtime. To do so, set REDIS_SKIP_KEYSPACE_EVENT_SETUP to `false`. -EXECUTOR:: Starts a definition of an Executor of this type. Valid values are ‘kubernetes’ and ‘ec2’ -BUILDER_NAMESPACE:: Kubernetes namespace where {productname} builds will take place -K8S_API_SERVER:: Hostname for API Server of OpenShift cluster where builds will take place -K8S_API_TLS_CA:: The filepath in the `Quay` container of the build cluster's CA certificate for the Quay app to trust when making API calls. -KUBERNETES_DISTRIBUTION:: Indicates which type of Kubernetes is being used. Valid values are ‘openshift’ and ‘k8s’. -CONTAINER_*:: Define the resource requests and limits for each build pod. -NODE_SELECTOR_*:: Defines the node selector label name/value pair where build Pods should be scheduled. -CONTAINER_RUNTIME:: Specifies whether the builder should run `docker` or `podman`. Customers using Red Hat’s `quay-builder` image should set this to `podman`. -SERVICE_ACCOUNT_NAME/SERVICE_ACCOUNT_TOKEN:: Defines the Service Account name/token that will be used by build Pods. -QUAY_USERNAME/QUAY_PASSWORD:: Defines the registry credentials needed to pull the {productname} build worker image that is specified in the WORKER_IMAGE field. -ifdef::upstream[] -This is useful if pulling a non-public quay-builder image from quay.io. -endif::upstream[] -ifdef::downstream[] -Customers should provide a Red Hat Service Account credential as defined in the section "Creating Registry Service Accounts" against registry.redhat.io in the article at https://access.redhat.com/RegistryAuthentication. -endif::downstream[] -WORKER_IMAGE:: Image reference for the {productname} builder image. -ifdef::upstream[] -quay.io/quay/quay-builder -endif::upstream[] -ifdef::downstream[] -registry.redhat.io/quay/quay-builder -endif::downstream[] -WORKER_TAG:: Tag for the builder image desired. -ifdef::upstream[] -Typically this is latest. -endif::upstream[] -ifdef::downstream[] -The latest version is v3.4.0. -endif::downstream[] -BUILDER_VM_CONTAINER_IMAGE:: The full reference to the container image holding the internal VM needed to run each {productname} build -ifdef::upstream[] -(`quay.io/quay/quay-builder-qemu-fedoracoreos:latest`). -endif::upstream[] -ifdef::downstream[] -(`registry.redhat.io/quay/quay-builder-qemu-rhcos:v3.4.0`). -endif::downstream[] -SETUP_TIME:: Specifies the number of seconds at which a build times out if it has not yet registered itself with the Build Manager (default is 500 seconds). Builds that time out are attempted to be restarted three times. If the build does not register itself after three attempts it is considered failed. -MINIMUM_RETRY_THRESHOLD:: This setting is used with multiple Executors; it indicates how many retries are attempted to start a build before a different Executor is chosen. Setting to 0 means there are no restrictions on how many tries the build job needs to have. This value should be kept intentionally small (three or less) to ensure failovers happen quickly in the event of infrastructure failures. -E.g Kubernetes is set as the first executor and EC2 as the second executor. If we want the last attempt to run a job to always be executed on EC2 and not Kubernetes, we would set the Kubernetes executor’s `MINIMUM_RETRY_THRESHOLD` to 1 and EC2’s `MINIMUM_RETRY_THRESHOLD` to 0 (defaults to 0 if not set). -In this case, kubernetes’ `MINIMUM_RETRY_THRESHOLD` > retries_remaining(1) would evaluate to False, thus falling back to the second executor configured -SSH_AUTHORIZED_KEYS:: List of ssh keys to bootstrap in the ignition config. This allows other keys to be used to ssh into the EC2 instance or QEMU VM - -ifdef::upstream[] -== Setting Up {productname} Builders with AWS -In addition to OpenShift, {productname} can also be configured to use AWS EC2 instances as build worker nodes. This is useful for situations where you may want to have EC2 based builds available as a backup solution in the event your OpenShift build workers are overloaded or unavailable. - -The setup steps are identical to OpenShift based builds with the following changes in your {productname} configuration bundle. - -[source,yaml] ----- - EXECUTORS: - - EXECUTOR: ec2 - QUAY_USERNAME: quayusertopullworker - QUAY_PASSWORD: quaypass - WORKER_IMAGE: quay.io/quay/quay-builder - WORKER_TAG: latest - EC2_REGION: us-east-1 - COREOS_AMI: ami-02545325b519192df # Fedora CoreOS - AWS_ACCESS_KEY: ***** - AWS_SECRET_KEY: ***** - EC2_INSTANCE_TYPE: t2.large - EC2_VPC_SUBNET_ID: subnet-somesubnet - EC2_SECURITY_GROUP_IDS: - - sg-somesg - EC2_KEY_NAME: Some key - BLOCK_DEVICE_SIZE: 58 - SSH_AUTHORIZED_KEYS: - - ssh-rsa 12345 someuser@email.com - - ssh-rsa 67890 someuser2@email.com ----- - -COREOS_AMI:: Specifies an AMI name where builds will be run. Unlike the OpenShift based builds, these container builds are done directly within an ephemeral EC2 instance. This AMI must utilize ignition and contain a docker. The AMI shown in this example is used by quay.io for its build system. - -[NOTE] -==== -AWS builds are not supported by Red Hat and are currently provided as an upstream feature only. -==== - -endif::upstream[] - -== OpenShift Routes Limitation - -[NOTE] -==== -This section only applies if you are using the Quay Operator on OpenShift with managed `route` component. -==== - -Due to a limitation of OpenShift `Routes` to only be able to serve traffic to a single port, additional steps are required to set up builds. Ensure that your `kubectl` or `oc` CLI tool is configured to work with the cluster where the Quay Operator is installed and that your `QuayRegistry` exists (not necessarily the same as the bare metal cluster where your builders run). - -* Ensure that HTTP/2 ingress is enabled on the OpenShift cluster by following link:https://docs.openshift.com/container-platform/4.5/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress[these steps]. - -* The Quay Operator will create a `Route` which directs gRPC traffic to the build manager server running inside the existing Quay pod(s). If you want to use a custom hostname (such as a subdomain like `builder.registry.example.com`), ensure that you create a CNAME record with your DNS provider which points to the `status.ingress[0].host` of the created `Route`: -+ ----- -$ kubectl get -n route -quay-builder -o jsonpath={.status.ingress[0].host} ----- - -* Using the OpenShift UI or CLI, update the `Secret` referenced by `spec.configBundleSecret` of the `QuayRegistry` with the build cluster CA certificate (name the key `extra_ca_cert_build_cluster.cert`), and update the `config.yaml` entry with the correct values referenced in the builder config above (depending on your build executor) along with the `BUILDMAN_HOSTNAME` field: -+ -[source,yaml] ----- -BUILDMAN_HOSTNAME: -BUILD_MANAGER: -- ephemeral -- ALLOWED_WORKER_COUNT: 1 - ORCHESTRATOR_PREFIX: buildman/production/ - JOB_REGISTRATION_TIMEOUT: 600 - ORCHESTRATOR: - REDIS_HOST: quay-redis-host - REDIS_PASSWORD: quay-redis-password - REDIS_SSL: true - REDIS_SKIP_KEYSPACE_EVENT_SETUP: false - EXECUTORS: - - EXECUTOR: kubernetes - BUILDER_NAMESPACE: builder - ... ----- - -The extra configuration field is explained below: - -BUILDMAN_HOSTNAME:: The externally accessible server hostname which the build jobs use to communicate back to the build manager. Default is the same as `SERVER_HOSTNAME`. For OpenShift `Route`, it is either `status.ingress[0].host` or the CNAME entry if using a custom hostname. `BUILDMAN_HOSTNAME` **needs** to include the port number, e.g `somehost:443` for Openshift Route, as the gRPC client used to communicate with the build manager does not infer any port if omitted. - -== Troubleshooting Builds -The builder instances started by the build manager are ephemeral. This means that they will either get shut down by {productname}} on timeouts/failure or garbage collected by the control plane (EC2/K8s). This means that in order to get the builder logs, one needs to do so **while** the builds are running. - -=== DEBUG config flag -A DEBUG flag can be set in order to prevent the builder instances from getting cleaned up after completion/failure. To do so, in the desired executor configuration, set DEBUG to true. For example: - -[source,yaml] ----- - EXECUTORS: - - EXECUTOR: ec2 - DEBUG: true - ... - - EXECUTOR: kubernetes - DEBUG: true - ... ----- - -When set to true, DEBUG will prevent the build nodes from shutting down after the quay-builder service is done or fails, and will prevent the build manager from cleaning up the instances (terminating EC2 instances or deleting k8s jobs). -This will allow debugging builder node issues, and **should not** be set in a production environment. The lifetime service will still exist. i.e The instance will still shutdown after approximately 2 hours (EC2 instances will terminate, k8s jobs will complete) -Setting DEBUG will also affect ALLOWED_WORKER_COUNT, as the unterminated instances/jobs will still count towards the total number of running workers. This means the existing builder workers will need to manually be deleted if ALLOWED_WORKER_COUNT is reached to be able to schedule new builds. - -Use the followings steps: - -ifdef::upstream[] -=== EC2 -. Start a build in {productname} -. In the EC2 console, identify the instance started for the build. Build instances are named “Quay Ephemeral Builder” with Tag {“BuildUUID”: } -. Using the SSH key set by EC2_KEY_NAME, login to the builder instance with: -+ -``` -$ ssh -i /path/to/ssh/key/in/ec2/or/config/id_rsa core@ -``` -+ -. Get the quay-builder service logs: -+ -``` -$ systemctl status quay-builder -$ journalctl -f -u quay-builder -``` - - -=== OpenShift/K8S -endif::upstream[] - -. The guest VM forwards its SSH port (22) to its host’s (the pod) port 2222. Port forward the builder pod’s port 2222 to a port on localhost. e.g -+ -``` -$ kubectl port-forward 9999:2222 -``` -+ -. SSH into the VM running inside the container using a key set from SSH_AUTHORIZED_KEYS: -+ -``` -$ ssh -i /path/to/ssh/key/set/in/ssh_authorized_keys -p 9999 core@localhost -``` -+ -. Get the quay-builder service logs: -+ -``` -$ systemctl status quay-builder -$ journalctl -f -u quay-builder -``` -+ -* Step 2-3 can also be done in a single SSH command: -+ -``` -$ ssh -i /path/to/ssh/key/set/in/ssh_authorized_keys -p 9999 core@localhost ‘systemctl status quay-builder’ -$ ssh -i /path/to/ssh/key/set/in/ssh_authorized_keys -p 9999 core@localhost ‘journalctl -f -u quay-builder’ -``` - - -[[set-up-github-build]] -== Setting up GitHub builds (optional) -If your organization plans to have builds be conducted via pushes to GitHub -(or GitHub Enterprise), continue with _Creating an OAuth application in GitHub_. +ifeval::["{context}" == "quay-builders-image-automation"] +The procedures in this section explain how to create an environment for _bare metal builds_ for {productname-ocp}. +endif::[] diff --git a/modules/proc_use-quay-create-repo.adoc b/modules/proc_use-quay-create-repo.adoc index 16b6f3cdf..ff28066bb 100644 --- a/modules/proc_use-quay-create-repo.adoc +++ b/modules/proc_use-quay-create-repo.adoc @@ -1,77 +1,53 @@ -[[use-quay-create-repo]] -= Creating a repository -A repository provides a central location for storing a related set of container images. There are two ways to create a repository in {productname}: via a push (from `docker` or `podman`) and via the {productname} UI. These are essentially the same, whether you are using Quay.io or your own instance of {productname}. -[[creating-an-image-repository-via-the-ui]] -== Creating an image repository via the UI +// module included in the following assemblies: -To create a repository in the {productname} UI under a user account: -. Log in to the user account through the web UI. -. Click the + icon in the top right of the header on the home page (or -other page related to the user) and choose New Repository, as shown in -the following figure: -+ -image:repo-create.png[Create a new repository for a user.] +// * use_quay/master.adoc +// * quay_io/master.adoc -. On the Create New Repository page that appears +:_content-type: CONCEPT +[id="use-quay-create-repo"] += {productname} repository overview - * Add the new repository name to your user name +A repository provides a central location for storing a related set of container images. These images can be used to build applications along with their dependencies in a standardized format. - * Click Repository Description and type a description of the repository +Repositories are organized by namespaces. Each namespace can have multiple repositories. For example, you might have a namespace for your personal projects, one for your company, or one for a specific team within your organization. - * In Repository Visibility, select whether you want the repository to be public or private +ifeval::["{context}" == "quay-io"] +With a paid plan, {quayio} provides users with access controls for their repositories. Users can make a repository public, meaning that anyone can pull, or download, the images from it, or users can make it private, restricting access to authorized users or teams. +endif::[] - * Click the Create Repository button. +ifeval::["{context}" == "use-quay"] +{productname} provides users with access controls for their repositories. Users can make a repository public, meaning that anyone can pull, or download, the images from it, or users can make it private, restricting access to authorized users or teams. +endif::[] -The new repository is created, starting out empty. A docker pull command you -could use to pull an image from this repository (minus the image name) appears -on the screen. +ifeval::["{context}" == "quay-security"] +Private repositories provide control over the users that have access to your images by allowing you to define users or teams who can push to, or pull from, the repository, thereby enhancing the security of your registry. +endif::[] -To create a repository in the {productname} UI under an organization: - -. Log in as a user that has Admin or Write permission to the organization. -. From the Repositories view, select the organization name from the right -column under Users and Organizations. The page for the organization appears, similar to the page shown in Figure 2.x: -. Click +Create New Repository in the upper-right part of the page. -. On the Create New Repository page that appears: - * Add the new repository name to the organization name - * Click Repository Description and type a description of the repository - * In Repository Visibility, select whether you want the repository to be public or private - * Click the Create Repository button. - -The new repository is created, starting out empty. A docker pull command you could use to -pull an image from this repository (minus the image name) appears on the screen. - - -[[creating-an-image-repository-via-docker]] -== Creating an image repository via docker or podman +ifeval::["{context}" == "quay-io"] +[NOTE] +==== +The free tier of {quayio} does not allow for private repositories. You must upgrade to a paid tier of {quayio} to create a private repository. For more information, see "Information about {quayio} pricing". +==== +endif::[] -Assuming you have the proper credentials, pushing an image to a repository -that does not yet exist in your {productname} instance will create that -repository as it pushes the image to that repository. Either the `docker` or -`podman` commands will work for these examples. +ifeval::["{context}" == "quay-io"] +There are two ways to create a repository in {quayio}: by pushing an image with the relevant `podman` command, or by using the {quayio} UI. You can also use the UI to delete a repository. +endif::[] +ifeval::["{context}" == "use-quay"] +There are multiple ways to create a repository in {productname}. The following options are available depending on your use case: -. Tag the image: With an image available from `docker` or `podman` on your local -system, tag that image with the new repository name and image name. Here are -examples for pushing images to Quay.io or your own {productname} setup (for -example, reg.example.com). For the examples, replace namespace with your -{productname} user name or organization and repo_name with the name of the -repository you want to create: -+ -``` -# sudo podman tag myubi-minimal quay.io/namespace/repo_name -# sudo podman tag myubi-standard reg.example.com/namespace/repo_name -``` +* You can push an image with the relevant `podman` or `docker` command. +* You can use the {productname} UI. +* You can use the {productname} API. +* For OCI artifacts, for example, a large-language model (LLM) or machine learning application, you can use `skopeo` or `oras` to copy the artifact to your repository. +endif::[] -. Push to the appropriate registry. For example: -+ -``` -# sudo podman push quay.io/namespace/repo_name -# sudo podman push reg.example.com/namespace/repo_name -``` +ifeval::["{context}" == "quay-io"] +If you push an image through the command-line interface (CLI) without first creating a repository on the UI, the created repository is set to *Private*, regardless of the plan you have. [NOTE] ==== -To create an application repository, follow the same procedure you did -for creating a container image repository. +It is recommended that you create a repository on the {quayio} UI before pushing an image. {quayio} checks the plan status and does not allow creation of a private repository if a plan is not active. ==== +endif::[] \ No newline at end of file diff --git a/modules/proc_use-quay-git-trigger.adoc b/modules/proc_use-quay-git-trigger.adoc index 0c2bd0eae..9db54dba6 100644 --- a/modules/proc_use-quay-git-trigger.adoc +++ b/modules/proc_use-quay-git-trigger.adoc @@ -1,55 +1,70 @@ -= Setting up a Custom Git Trigger +[id="setting-up-custom-git-trigger"] += Setting up a custom Git trigger -A Custom Git Trigger is a generic way for any git server to act as a -build trigger. It relies solely on SSH keys and webhook endpoints; everything else -is left to the user to implement. +After you have created a _custom Git trigger_, two additional steps are required: -[[creating-a-custom-git-trigger]] -== Creating a Trigger +. You must provide read access to the SSH public key that is generated when creating the trigger. -Creating a Custom Git Trigger is similar to the creation of any other -trigger with a few subtle differences: +ifeval::["{context}" == "quay-io"] +. You must setup a webhook that POSTs to the {quayio} endpoint to trigger the build. +endif::[] +ifeval::["{context}" == "quay-builders-image-automation"] +. You must setup a webhook that POSTs to the {productname} endpoint to trigger the build. +endif::[] -* It is not possible for {productname} to automatically detect the proper -robot account to use with the trigger. This must be done manually in the -creation process. -* There are extra steps after the creation of the trigger that must be -done in order to use the trigger. These steps are detailed below. +These steps are only required if you are using a _custom Git trigger_. -[[post-git-trigger-creation-setup]] -== Post trigger-creation setup +[id="obtaining-build-trigger-credentials"] +== Obtaining build trigger credentials -Once a trigger has been created, *there are 2 additional steps required* -before the trigger can be used: +The SSH public key and Webhook Endpoint URL are available on the {productname} UI. -* Provide read access to the _SSH public key_ generated when creating -the trigger. -* Setup a _webhook_ that POSTs to the {productname} endpoint to trigger a -build. +.Prerequisites -The key and the URL are both available at all times by selecting `View -Credentials` from the gear located in the trigger listing. -image:view-credentials.png[View and modify tags from your repository] +* You have created a _custom Git trigger_. -[[ssh-public-key-access]] +.Procedure + +. On the *Builds* page of your repository, click the menu kebab for your _custom Git trigger_. + +. Click *View Credentials*. + +. Save the SSH Public Key and Webhook Endpoint URL. + +The key and the URL are available by selecting *View Credentials* from the *Settings*, or _gear_ icon. + +.View and modify tags from your repository +image:view-credentials.png[Trigger Credentials] + +[id="ssh-public-key-access"] === SSH public key access -Depending on the Git server setup, there are various ways to install the -SSH public key that {productname} generates for a custom git trigger. For -example, -https://git-scm.herokuapp.com/book/en/v2/Git-on-the-Server-Getting-Git-on-a-Server[Git documentation] describes a small server setup in which simply adding -the key to `$HOME/.ssh/authorize_keys` would provide access for builders -to clone the repository. For any git repository management software that -isn't officially supported, there is usually a location to input the key -often labeled as `Deploy Keys`. +Depending on the Git server configuration, there are multiple ways to install the SSH public key that +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +generates for a custom Git trigger. + +For example, documentation for link:https://git-scm.com/book/en/v2/Git-on-the-Server-Getting-Git-on-a-Server[Getting Git on a Server] describes a describes how to set up a Git server on a Linux-based machine with a focus on managing repositories and access control through SSH. In this procedure, a small server is set up to add the keys to the `$HOME/.ssh/authorize_keys` folder, which provides access for _builders_ to clone the repository. + +For any Git repository management software that is not officially supported, there is usually a location to input the key that is often labeled as *Deploy Keys*. -[[webhook]] +[id="webhook"] === Webhook +To automatically trigger a build, you must `POST` a `.json` payload to the webhook URL using the following format: -In order to automatically trigger a build, one must POST a JSON payload -to the webhook URL with the following format: +[NOTE] +==== +This request requires a `Content-Type` header containing +`application/json` in order to be valid. +==== -``` +.Example webhook +[source,terminal] +---- { "commit": "1c002dd", // required "ref": "refs/heads/master", // required @@ -70,15 +85,6 @@ to the webhook URL with the following format: } } } -``` - -[NOTE] -==== -This request requires a `Content-Type` header containing -`application/json` in order to be valid. -==== +---- -Once again, this can be accomplished in various ways depending on the -server setup, but for most cases can be done via a -https://git-scm.herokuapp.com/book/en/v2/Customizing-Git-Git-Hooks#idp26374144[post-receive -git hook]. +This can typically be accomplished with a link:https://git-scm.com/docs/githooks#post-receive[`post-receive` Git hook], however it does depend on your server setup. \ No newline at end of file diff --git a/modules/proc_use-quay-manage-repo.adoc b/modules/proc_use-quay-manage-repo.adoc index d9eb1c580..67fad9aff 100644 --- a/modules/proc_use-quay-manage-repo.adoc +++ b/modules/proc_use-quay-manage-repo.adoc @@ -1,179 +1,34 @@ -[[use-quay-manage-repo]] -= Managing access to repositories -As a {productname} user, you can create your own repositories and -make them accessible to other users on your {productname} instance. -As an alternative, you can create organizations to allow access to -repositories based on teams. In both user and organization repositories, -you can allow access to those repositories by creating credentials -associated with robot accounts. Robot accounts make it easy for a variety -of container clients (such as docker or podman) to access your repos, -without requiring that the client have a {productname} user account. - -[[allow-access-user-repo]] -== Allowing access to user repositories -When you create a repository in a user namespace, you can add access to -that repository to user accounts or through robot accounts. - -[[allow-user-access-user-repo]] -=== Allowing user access to a user repository - -To allow access to a repository associated with a user account, do the following: - -. Log into your {productname} user account. -. Select a repository under your user namespace to which you want to share access. -. Select the Settings icon from the left column. -. Type the name of the user to which you want to grant access to your repository. -The user name should appear as you type, as shown in the following figure: -+ -image:grant-user-access.png[Grant user access to a user repository] - -. In the permissions box, select one of the following: - * Read - Allows the user to view the repository and pull from it. - * Write - Allows the user to view the repository, as well as pull images from or push images to the repository. - * Admin - Allows all administrative settings to the repository, as well as all Read and Write permissions. - -. Select the Add Permission button. The user now has the assigned permission. - -To remove the user permissions to the repository, select the Options icon -to the right of the user entry, then select Delete Permission. - -[[allow-robot-access-user-repo]] -== Allowing robot access to a user repository -Robot accounts are used to set up automated access to the repositories in -your {productname} registry. They are similar to OpenShift service accounts. -When you set up a robot account, you: - -* Generate credentials that are associated with the robot account -* Identify repositories and images that the robot can push images to -or pull images from -* Copy and paste generated credentials to use with different container -clients (such as Docker, podman, Kubernetes, Mesos and others) to access -each defined repository - -Keep in mind that each robot account is limited to a single user namespace -or organization. So, for example, the robot could provide access to all -repositories accessible to a user jsmith, but not to any that are not in -the user’s list of repositories. - -The following procedure steps you through setting up a robot account to -allow access to your repositories. - -. Select Robot icon: From the Repositories view, select the Robot icon from the left column. -. Create Robot account: Select the Create Robot Account button. -. Set Robot name: Enter the name and description, then select the Create -robot account button. The robot name becomes a combination of your user name, -plus the robot name you set (for example, jsmith+myrobot) -. Add permission to the robot account: From the Add permissions screen for the -robot account, define the repositories you want the robot to access as follows: - * Put a check mark next to each repository the robot can access - * For each repository, select one of the following, and click Add permissions: - - None - Robot has no permission to the repository - - Read - Robot can view and pull from the repository - - Write - Robot can read (pull) from and write (push) to the repository - - Admin - Full access to pull from and push to the repository, plus the ability -to do administrative tasks associated with the repository - * Select the Add permissions button to apply the settings -. Get credentials to access repositories via the robot: Back on the Robot -Accounts page, select the Robot account name to see credential information for -that robot. -. Get the token: Select Robot Token, as shown in the following figure, to see -the token that was generated for the robot. If you want to reset the token, -select Regenerate Token. -+ -[NOTE] -==== -It is important to understand that regenerating a token makes any previous tokens for this robot invalid. -==== -+ -image:robot-gen-token.png[Select Options drop-down to change user passwords] - -. Get credentials: Once you are satisfied with the generated token, get the -resulting credentials in the following ways: - * Kubernetes Secret: Select this to download credentials in the form of a -Kubernetes pull secret yaml file. - * rkt Configuration: Select this to download credentials for the rkt -container runtime in the form of a json file. - * Docker Login: Select this to copy a full `docker login` command line that -includes the credentials. - * Docker Configuration: Select this to download a file to use as a Docker -config.json file, to permanently store the credentials on your client system. - * Mesos Credentials: Select this to download a tarball that provides the -credentials that can be identified in the uris field of a Mesos configuration -file. - -[[allow-access-org-repo]] -== Allowing access to organization repositories -Once you have created an organization, you can associate a set of repositories -directly to that organization. To add access to the repositories in that -organization, you can add Teams (sets of users with the same permissions) and -individual users. Essentially, an organization has the same ability to create -repositories and robot accounts as a user does, but an organization is intended -to set up shared repositories through groups of users (in teams or individually). - -Other things to know about organizations: - -* You cannot have an organization in another organization. To subdivide an -organization, you use teams. -* Organizations can’t contain users directly. You must first add a team, -then add one or more users to each team. -* Teams can be set up in organizations as just members who use the repos and -associated images or as administrators with special privileges for managing -the organization - -[[allow-team-access-org-repo]] -=== Adding a Team to an organization -When you create a team for your organization you can select the team name, -choose which repositories to make available to the team, and decide the -level of access to the team. - -. From the Organization view, select the Teams and Membership icon from the -left column. You will see that an owners Team exists with Admin privilege -for the user who created the Organization. -. Select Create New Team. You are prompted for the new team name to be -associated with the organization. Type the team name, which must start with -a lowercase letter, with the rest of the team name as any combination of -lowercase letters and numbers (no capitals or special characters allowed). -. Select the Create team button. The Add permissions window appears, -displaying a list of repositories in the organization. -. Check each repository you want the team to be able to access. Then -select one of the following permissions for each: - * Read - Team members are able to view and pull images - * Write - Team members can view, pull, and push images - * Admin - Team members have full read/write privilege, plus the ability to do administrative tasks related to the repository -. Select Add permissions to save the repository permissions for the team. - -[[set-team-role]] -=== Setting a Team role -After you have added a team, you can set the role of that team within the -organization. From the Teams and Membership screen within the organization, -select the TEAM ROLE drop-down menu, as shown in the following figure: - -image:set-team-role.png[Set the role that a team has within an organization] - -For the selected team, choose one of the following roles: - -* Member - Inherits all permissions set for the team -* Creator - All member permissions, plus the ability to create new repositories -* Admin - Full administrative access to the organization, including the ability to create teams, add members, and set permissions. - -[[add-users-to-team]] -=== Adding users to a Team -As someone with Admin privilege to an organization, you can add users and -robots to a team. When you add a user, it sends an email to that user. -The user remains pending until that user accepts the invitation. - -To add users or robots to a team, start from the organization’s screen and do the following: - -. Select the team you want to add users or robots to. -. In the Team Members box, type one of the following: - * A username from an account on the {productname} registry - * The email address for a user account on the registry - * The name of a robot account. The name must be in the form of orgname+robotname -. In the case of the robot account, it is immediately added to the team. For -a user account, an invitation to join is mailed to the user. Until the user -accepts that invitation, the user remains in the INVITED TO JOIN state. - -Next, the user accepts the email invitation to join the team. The next time the -user logs in to the {productname} instance, the user moves from the -INVITED TO JOIN list to the MEMBERS list for the organization. +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="use-quay-manage-repo"] += Access management for {productname} + +As a +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +user, you can create your own repositories and make them accessible to other users that are part of your instance. Alternatively, you can create an organization and associate a set of repositories directly to that organization, referred to as an _organization repository_. + +Organization repositories differ from basic repositories in that the organization is intended to set up shared repositories through groups of users. In +ifeval::["{context}" == "quay-io"] +{quayio}, +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}, +endif::[] +groups of users can be either _Teams_, or sets of users with the same permissions, or _individual users_. You can also allow access to user repositories and organization repositories by creating credentials associated with Robot Accounts. Robot Accounts make it easy for a variety of container clients, such as Docker or Podman, to access your repositories without requiring that the client have a +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +user account. \ No newline at end of file diff --git a/modules/proc_use-quay-notifications.adoc b/modules/proc_use-quay-notifications.adoc index 7421cd70b..7bf6d519c 100644 --- a/modules/proc_use-quay-notifications.adoc +++ b/modules/proc_use-quay-notifications.adoc @@ -1,358 +1,21 @@ -= Repository Notifications +// module included in the following assemblies: -Quay supports adding _notifications_ to a repository for various events -that occur in the repository's lifecycle. To add notifications, click -the *Settings* tab while viewing a repository and select -`Create Notification`. From the `When this event occurs` -field, select the items for which you want to receive notifications: +// * use_quay/master.adoc +// * quay_io/master.adoc -image:event-select.png[Create repository notifications] - -After selecting an event, further configure it by adding how you -will be notified of that event. - -[NOTE] -==== -Adding notifications requires _repository admin permission_. -==== - -The following are examples of repository events. - -[[repository-events]] -== Repository Events - -[[repository-push]] -=== Repository Push - -A successful push of one or more images was made to the repository: - -``` -{ - "name": "repository", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "homepage": "https://quay.io/repository/dgangaia/repository", - "updated_tags": [ - "latest" - ] -} -``` - -[[dockerfile-build-queued]] -=== Dockerfile Build Queued - -Here is a sample response for a Dockerfile build has been queued into the build system. -The response can differ based on the use of optional attributes. - -``` -{ - "build_id": "296ec063-5f86-4706-a469-f0a400bf9df2", - "trigger_kind": "github", //Optional - "name": "test", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional - "docker_tags": [ - "master", - "latest" - ], - "repo": "test", - "trigger_metadata": { - "default_branch": "master", - "commit": "b7f7d2b948aacbe844ee465122a85a9368b2b735", - "ref": "refs/heads/master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { //Optional - "url": "https://github.com/dgangaia/test/commit/b7f7d2b948aacbe844ee465122a85a9368b2b735", - "date": "2019-03-06T12:48:24+11:00", - "message": "adding 5", - "author": { //Optional - "username": "dgangaia", - "url": "https://github.com/dgangaia", //Optional - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional - }, - "committer": { - "username": "web-flow", - "url": "https://github.com/web-flow", - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" - } - } - }, - "is_manual": false, - "manual_user": null, - "homepage": "https://quay.io/repository/dgangaia/test/build/296ec063-5f86-4706-a469-f0a400bf9df2" -} -``` - -[[dockerfile-build-started]] -=== Dockerfile Build Started - -Here is an example of a Dockerfile build being started by the build system. -The response can differ based on some attributes being optional. - -``` -{ - "build_id": "a8cc247a-a662-4fee-8dcb-7d7e822b71ba", - "trigger_kind": "github", //Optional - "name": "test", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional - "docker_tags": [ - "master", - "latest" - ], - "build_name": "50bc599", - "trigger_metadata": { //Optional - "commit": "50bc5996d4587fd4b2d8edc4af652d4cec293c42", - "ref": "refs/heads/master", - "default_branch": "master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { //Optional - "url": "https://github.com/dgangaia/test/commit/50bc5996d4587fd4b2d8edc4af652d4cec293c42", - "date": "2019-03-06T14:10:14+11:00", - "message": "test build", - "committer": { //Optional - "username": "web-flow", - "url": "https://github.com/web-flow", //Optional - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional - }, - "author": { //Optional - "username": "dgangaia", - "url": "https://github.com/dgangaia", //Optional - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional - } - } - }, - "homepage": "https://quay.io/repository/dgangaia/test/build/a8cc247a-a662-4fee-8dcb-7d7e822b71ba" -} -``` - -[[dockerfile-build-successfully-completed]] -=== Dockerfile Build Successfully Completed - -Here is a sample response of a Dockerfile build that has been successfully completed by the build system. +:_content-type: CONCEPT +[id="repository-notifications"] += Notifications overview +ifeval::["{context}" == "quay-io"] +{quayio} supports adding _notifications_ to a repository for various events that occur in the repository's lifecycle. +ifdef::upstream[] [NOTE] ==== -This event will occur *simultaneously* with a _Repository Push_ -event for the built image(s) +By default, vulnerability notifications are disabled on {quayio} and cannot be enabled. ==== - -``` -{ - "build_id": "296ec063-5f86-4706-a469-f0a400bf9df2", - "trigger_kind": "github", //Optional - "name": "test", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional - "docker_tags": [ - "master", - "latest" - ], - "build_name": "b7f7d2b", - "image_id": "sha256:0339f178f26ae24930e9ad32751d6839015109eabdf1c25b3b0f2abf8934f6cb", - "trigger_metadata": { - "commit": "b7f7d2b948aacbe844ee465122a85a9368b2b735", - "ref": "refs/heads/master", - "default_branch": "master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { //Optional - "url": "https://github.com/dgangaia/test/commit/b7f7d2b948aacbe844ee465122a85a9368b2b735", - "date": "2019-03-06T12:48:24+11:00", - "message": "adding 5", - "committer": { //Optional - "username": "web-flow", - "url": "https://github.com/web-flow", //Optional - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional - }, - "author": { //Optional - "username": "dgangaia", - "url": "https://github.com/dgangaia", //Optional - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional - } - } - }, - "homepage": "https://quay.io/repository/dgangaia/test/build/296ec063-5f86-4706-a469-f0a400bf9df2", - "manifest_digests": [ - "quay.io/dgangaia/test@sha256:2a7af5265344cc3704d5d47c4604b1efcbd227a7a6a6ff73d6e4e08a27fd7d99", - "quay.io/dgangaia/test@sha256:569e7db1a867069835e8e97d50c96eccafde65f08ea3e0d5debaf16e2545d9d1" - ] -} -``` - -[[dockerfile-build-failed]] -=== Dockerfile Build Failed - -A Dockerfile build has failed - -``` -{ - "build_id": "5346a21d-3434-4764-85be-5be1296f293c", - "trigger_kind": "github", //Optional - "name": "test", - "repository": "dgangaia/test", - "docker_url": "quay.io/dgangaia/test", - "error_message": "Could not find or parse Dockerfile: unknown instruction: GIT", - "namespace": "dgangaia", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional - "docker_tags": [ - "master", - "latest" - ], - "build_name": "6ae9a86", - "trigger_metadata": { //Optional - "commit": "6ae9a86930fc73dd07b02e4c5bf63ee60be180ad", - "ref": "refs/heads/master", - "default_branch": "master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { //Optional - "url": "https://github.com/dgangaia/test/commit/6ae9a86930fc73dd07b02e4c5bf63ee60be180ad", - "date": "2019-03-06T14:18:16+11:00", - "message": "failed build test", - "committer": { //Optional - "username": "web-flow", - "url": "https://github.com/web-flow", //Optional - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional - }, - "author": { //Optional - "username": "dgangaia", - "url": "https://github.com/dgangaia", //Optional - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional - } - } - }, - "homepage": "https://quay.io/repository/dgangaia/test/build/5346a21d-3434-4764-85be-5be1296f293c" -} - -``` - -[[dockerfile-build-cancelled]] -=== Dockerfile Build Cancelled - -A Dockerfile build was cancelled - -``` -{ - "build_id": "cbd534c5-f1c0-4816-b4e3-55446b851e70", - "trigger_kind": "github", - "name": "test", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", - "docker_tags": [ - "master", - "latest" - ], - "build_name": "cbce83c", - "trigger_metadata": { - "commit": "cbce83c04bfb59734fc42a83aab738704ba7ec41", - "ref": "refs/heads/master", - "default_branch": "master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { - "url": "https://github.com/dgangaia/test/commit/cbce83c04bfb59734fc42a83aab738704ba7ec41", - "date": "2019-03-06T14:27:53+11:00", - "message": "testing cancel build", - "committer": { - "username": "web-flow", - "url": "https://github.com/web-flow", - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" - }, - "author": { - "username": "dgangaia", - "url": "https://github.com/dgangaia", - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" - } - } - }, - "homepage": "https://quay.io/repository/dgangaia/test/build/cbd534c5-f1c0-4816-b4e3-55446b851e70" -} -``` - -[[vulnerability-detected]] -=== Vulnerability Detected - - -A vulnerability was detected in the repository - -``` -{ - "repository": "dgangaia/repository", - "namespace": "dgangaia", - "name": "repository", - "docker_url": "quay.io/dgangaia/repository", - "homepage": "https://quay.io/repository/dgangaia/repository", - - "tags": ["latest", "othertag"], - - "vulnerability": { - "id": "CVE-1234-5678", - "description": "This is a bad vulnerability", - "link": "http://url/to/vuln/info", - "priority": "Critical", - "has_fix": true - } -} -``` - -[[notification-actions]] -== Notification Actions - - -[[quay-notification]] -=== Quay Notification - -A notification will be added to the Quay.io notification area. The -notification area can be found by clicking on the bell icon in the top -right of any Quay.io page. - -Quay.io notifications can be setup to be sent to a _User_, _Team_, or the _organization_ as a whole. - -[[e-mail]] -=== E-mail - -An e-mail will be sent to the specified address describing the event -that occurred. - -[NOTE] -==== -All e-mail addresses will have to be verified on a -_per-repository_ basis -==== - -[[webhook-post]] -=== Webhook POST - -An HTTP POST call will be made to the specified URL with the event's -data (see above for each event's data format). - -When the URL is HTTPS, the call will have an SSL client certificate set -from Quay.io. Verification of this certificate will prove the call -originated from Quay.io. Responses with status codes in the 2xx range -are considered successful. Responses with any other status codes will be -considered failures and result in a retry of the webhook notification. - -[[flowdock-notification]] -=== Flowdock Notification - - -Posts a message to Flowdock. - -[[hipchat-notification]] -=== Hipchat Notification - - -Posts a message to HipChat. - -[[slack-notification]] -=== Slack Notification - - -Posts a message to Slack. +endif::upstream[] +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} supports adding _notifications_ to a repository for various events that occur in the repository's lifecycle. +endif::[] diff --git a/modules/proc_use-quay-skip-trigger.adoc b/modules/proc_use-quay-skip-trigger.adoc deleted file mode 100644 index d9ae9e899..000000000 --- a/modules/proc_use-quay-skip-trigger.adoc +++ /dev/null @@ -1,6 +0,0 @@ -= Skipping a source control-triggered build -[[skipping-source-control-trigger-build]] - -To specify that a commit should be ignored by the {productname} build system, add -the text `[skip build]` or `[build skip]` anywhere in the commit -message. diff --git a/modules/proc_use-quay-tags.adoc b/modules/proc_use-quay-tags.adoc index a814e2af0..557b3b394 100644 --- a/modules/proc_use-quay-tags.adoc +++ b/modules/proc_use-quay-tags.adoc @@ -1,144 +1,195 @@ -= Working with tags +:_content-type: CONCEPT +[id="working-with-tags"] += Image tags overview -Tags provide a way to identify the version of an image, as well as -offering a means of naming the same image in different ways. -Besides an image's version, an image tag can identify its uses (such as devel, -testing, or prod) or the fact that it is the most recent version (latest). +An _image tag_ refers to a label or identifier assigned to a specific version or variant of a container image. Container images are typically composed of multiple layers that represent different parts of the image. Image tags are used to differentiate between different versions of an image or to provide additional information about the image. -From the *Tags* tab of an image repository, you can view, modify, add, move, delete, and -see the history of tags. You also can fetch command-lines you can use to -download (pull) a specific image (based on its name and tag) using different commands. +Image tags have the following benefits: -[[viewing-and-modifying-tags]] -== Viewing and modifying tags +* *Versioning and Releases*: Image tags allow you to denote different versions or releases of an application or software. For example, you might have an image tagged as _v1.0_ to represent the initial release and _v1.1_ for an updated version. This helps in maintaining a clear record of image versions. -The tags of a repository can be viewed and modified in the tags panel of -the repository page, found by clicking on the *Tags* tab. -image:tag-operations.png[View and modify tags from your repository] +* *Rollbacks and Testing*: If you encounter issues with a new image version, you can easily revert to a previous version by specifying its tag. This is helpful during debugging and testing phases. -[[adding-a-new-tag-to-a-tagged-image]] -=== Adding a new tag to a tagged image +* *Development Environments*: Image tags are beneficial when working with different environments. You might use a _dev_ tag for a development version, _qa_ for quality assurance testing, and _prod_ for production, each with their respective features and configurations. -A new tag can be added to a tagged image by clicking on the gear icon next to -the tag and choosing `Add New Tag`. {productname} will confirm the addition of -the new tag to the image. +* *Continuous Integration/Continuous Deployment (CI/CD)*: CI/CD pipelines often utilize image tags to automate the deployment process. New code changes can trigger the creation of a new image with a specific tag, enabling seamless updates. -[[moving-a-tag]] -=== Moving a tag +* *Feature Branches*: When multiple developers are working on different features or bug fixes, they can create distinct image tags for their changes. This helps in isolating and testing individual features. -Moving a tag to a different image is accomplished by performing the same -operation as adding a new tag, but giving an existing tag name. {productname} -will confirm that you want the tag moved, rather than added. +* *Customization*: You can use image tags to customize images with different configurations, dependencies, or optimizations, while keeping track of each variant. -[[deleting-a-tag]] -=== Deleting a tag +* *Security and Patching*: When security vulnerabilities are discovered, you can create patched versions of images with updated tags, ensuring that your systems are using the latest secure versions. -A specific tag and all its images can be deleted by clicking on the tag's gear icon -and choosing `Delete Tag`. This will delete the tag and any images unique -to it. Images will not be deleted until no tag references them either -directly or indirectly through a parent child relationship. +* *Dockerfile Changes*: If you modify the Dockerfile or build process, you can use image tags to differentiate between images built from the previous and updated Dockerfiles. -[[viewing-tag-history-and-going-back-in-time]] -=== Viewing tag history and going back in time +Overall, image tags provide a structured way to manage and organize container images, enabling efficient development, deployment, and maintenance workflows. -[[viewing-tag-history]] -==== Viewing tag history -To view the image history for a tag, click on the `View Tags History` menu -item located under the `Actions` menu. The page shown will display each -image to which the tag pointed in the past and when it pointed to that -image. +[id="viewing-tag-history"] +== Viewing tag history -[[going-back-in-time]] -==== Going back in time +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive history of images and their respective image tags. -To revert the tag to a previous image, find the history line where your -desired image was overwritten, and click on the Restore link. +.Procedure -[[fetching-images-and-tags]] -=== Fetching an image by tag or digest -From the *Tags* tab, you can view different ways of pulling images from the clients -that are ready to use those images. +* Navigate to the *Tag History* page of a repository to view the image tag history. -. Select a particular repository/image -. Select Tags in the left column -. Select the Fetch Tag icon for a particular image/tag combination -. When the Fetch Tag pop-up appears, select the Image format box to -see a drop-down menu that shows different ways that are -available to pull the image. The selections offer full command lines -for pulling a specific container image to the local system: +[id="going-back-in-time"] +== Reverting tag changes -image:image-fetch.png[Get commands for fetching images in different ways] +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive _time machine_ feature that allows older images tags to remain in the repository for set periods of time so that they can revert changes made to tags. This feature allows users to revert tag changes, like tag deletions. -You can select to pull a regular of an image by tag name or by digest name using the *docker* command. -. Choose the type of pull you want, then select `Copy Command`. -The full command-line is copied into your clipboard. -These two commands show a *docker pull* by tag and by digest: +.Procedure -``` -docker pull quay.io/cnegus/whatever:latest -docker pull quay.io/cnegus/whatever@sha256:e02231a6aa8ba7f5da3859a359f99d77e371cb47e643ce78e101958782581fb9 -``` +. Navigate to the *Tag History* page of a repository. -Paste the command into a command-line shell on a system that has the -*docker* command and service available, and press Enter. -At this point, the container image is ready to run on your local system. +. Find the point in the timeline at which image tags were changed or removed. Next, click the option under *Revert* to restore a tag to its image, or click the option under *Permanently Delete* to permanently delete the image tag. -On RHEL and Fedora systems, you can -substitute *podman* for *docker* to pull and run the selected image. +[id="fetching-images-and-tags"] +== Fetching an image by tag or digest -[[tag-expiration]] -== Tag Expiration +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers multiple ways of pulling images using Docker and Podman clients. + +.Procedure + +. Navigate to the *Tags* page of a repository. + +. Under *Manifest*, click the *Fetch Tag* icon. -Images can be set to expire from a {productname} repository at a chosen date and time using a feature called `tag expiration`. -Here are a few things to know about about tag expiration: +. When the popup box appears, users are presented with the following options: ++ +* Podman Pull (by tag) +* Docker Pull (by tag) +* Podman Pull (by digest) +* Docker Pull (by digest) ++ +Selecting any one of the four options returns a command for the respective client that allows users to pull the image. -* When a tag expires, the tag is deleted from the repository. If it is the last tag for a specific image, the image is set to be deleted. +. Click *Copy Command* to copy the command, which can be used on the command-line interface (CLI). For example: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman pull quay.io/quayadmin/busybox:test2 +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ podman pull quay-server.example.com/quayadmin/busybox:test2 +---- +endif::[] -* Expiration is set on a per-tag basis, not for a repository on the whole. +[id="tag-expiration"] +== Tag Expiration + +Images can be set to expire from a +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +repository at a chosen date and time using the _tag expiration_ feature. This feature includes the following characteristics: -* When a tag expires or is deleted, it is not immediately removed from the registry. -The value of Time Machine (in User settings) defines when the deleted tag is actually removed -and garbage collected. By default, that value is 14 days. Up until that time, a tag can be repointed to an expired or deleted image. +* When an image tag expires, it is deleted from the repository. If it is the last tag for a specific image, the image is also set to be deleted. -* The {productname} superuser has no special privilege related to deleting expired images from user repositories. -There is no central mechanism for the superuser to gather information and act on user repositories. -It is up to the owners of each repository to manage expiration and ultimate deletion of their images. +* Expiration is set on a per-tag basis. It is not set for a repository as a whole. -Tag expiration can be set in different ways: +* After a tag is expired or deleted, it is not immediately removed from the registry. This is contingent upon the allotted time designed in the _time machine_ feature, which defines when the tag is permanently deleted, or garbage collected. By default, this value is set at _14 days_, however the administrator can adjust this time to one of multiple options. Up until the point that garbage collection occurs, tags changes can be reverted. -* By setting the `quay.expires-after=` LABEL in the Dockerfile when the image is created. -This sets a time to expire from when the image is built. +ifeval::["{context}" == "use-quay"] +The {productname} superuser has no special privilege related to deleting expired images from user repositories. There is no central mechanism for the superuser to gather information and act on user repositories. It is up to the owners of each repository to manage expiration and the deletion of their images. +endif::[] -* By choosing the expiration date from the EXPIRES column for the repository tag and selecting a specific date and time to expire. +Tag expiration can be set up in one of two ways: -The following figure shows the Options entry for changing tag expiration and the EXPIRES field for when the tag expires. -Hover over the EXPIRES field to see the expiration date and time that is currently set. +* By setting the `quay.expires-after=` LABEL in the Dockerfile when the image is created. This sets a time to expire from when the image is built. +* By selecting an expiration date on the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. For example: ++ image:tag-expires-ui.png[Change tag expiration under the Options icon or from the EXPIRES column] -=== Setting tag expiration from a Dockerfile +[id="setting-tag-from-dockerfile"] +== Setting tag expiration from a Dockerfile + +Adding a label, for example, `quay.expires-after=20h` by using the `docker label` command causes a tag to automatically expire after the time indicated. The following values for hours, days, or weeks are accepted: + +* `1h` +* `2d` +* `3w` + +Expiration begins from the time that the image is pushed to the registry. + +[id="setting-tag-expiration"] +== Setting tag expiration from the repository + +Tag expiration can be set on the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. + +.Procedure + +. Navigate to a repository and click *Tags* in the navigation pane. + +. Click the *Settings*, or _gear_ icon, for an image tag and select *Change Expiration*. + +. Select the date and time when prompted, and select *Change Expiration*. The tag is set to be deleted from the repository when the expiration time is reached. + +[id="security-scanning"] +== Viewing Clair security scans -Adding a label like `quay.expires-after=20h` via the Dockerfile LABEL command will cause a tag to automatically expire -after the time indicated. -The time values could be something like `1h`, `2d`, `3w` for hours, days, and weeks, respectively, from the time the image is built. +ifeval::["{context}" == "quay-io"] +{quayio} comes equipped with Clair security scanner. For more information about Clair on {quayio}, see "Clair security scanner." +endif::[] +ifeval::["{context}" == "use-quay"] +Clair security scanner is not enabled for {productname} by default. To enable Clair, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/testing-clair-with-quay[Clair on {productname}]. +endif::[] -=== Setting tag expiration from the repository +.Procedure -On the Repository Tag page there is a UI column titled *EXPIRES* that indicates when a tag will expire. -Users can set this by clicking on the time that it will expire or by clicking the Settings button (gear icon) on the right and choosing `Change Expiration`. +. Navigate to a repository and click *Tags* in the navigation pane. This page shows the results of the security scan. -Choose the date and time when prompted and select `Change Expiration`. -The tag will be set to be deleted from the repository when the expiration time is reached. +. To reveal more information about multi-architecture images, click *See Child Manifests* to see the list of manifests in extended view. -[[security-scanning]] -== Security scanning +. Click a relevant link under *See Child Manifests*, for example, *1 Unknown* to be redirected to the *Security Scanner* page. -By clicking the on the vulnerability or fixable count next to a tab you -can jump into the security scanning information for that tag. There you -can find which CVEs your image is susceptible to, and what remediation -options you may have available. +. The *Security Scanner* page provides information for the tag, such as which CVEs the image is susceptible to, and what remediation options you might have available. -Keep in mind that image scanning only lists vulnerabilities found by the Clair image scanner. -What each user does about the vulnerabilities that are uncovered is completely up to that user. -The {productname} superuser does not act on those vulnerabilities found. +[NOTE] +==== +Image scanning only lists vulnerabilities found by Clair security scanner. What users do about the vulnerabilities are uncovered is up to said user. +ifeval::["{context}" == "use-quay"] +{productname} superusers do not act on found vulnerabilities. +endif::[] +==== diff --git a/modules/proc_use-quay-view-export-logs.adoc b/modules/proc_use-quay-view-export-logs.adoc index 0d226243b..8eaf5e287 100644 --- a/modules/proc_use-quay-view-export-logs.adoc +++ b/modules/proc_use-quay-view-export-logs.adoc @@ -1,79 +1,55 @@ -[[use-quay-view-export-logs]] +:_content-type: PROCEDURE +[id="use-quay-view-export-logs"] = Viewing and exporting logs -Activity logs are gathered for all repositories and namespaces (users and -organizations) in {productname}. There are multiple ways of accessing -log files, including: +Activity logs are gathered for all repositories and namespace in +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] -* Viewing logs through the web UI -* Exporting logs so they can be saved externally. -* Accessing log entries via the API +Viewing usage logs of +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +can provide valuable insights and benefits for both operational and security purposes. Usage logs might reveal the following information: -To access logs, you must have Admin privilege to the selected repository -or namespace. +//// +* *Monitoring and Performance Analysis*: Usage logs can help you monitor the performance of your container registry. By analyzing logs, you can identify patterns in usage, peak times, and potential bottlenecks. This information can guide resource allocation and optimization efforts. +//// -[NOTE] -==== -A maximum of 100 log results are available at a time via the API. -To gather more results that that, you must use the log exporter feature -described in this chapter. -==== - -[[use-quay-view-logs]] -== Viewing logs -To view log entries for a repository or namespace from the web UI, do the -following: - -. Select a repository or namespace (organization or user) for which you -have Admin privileges. -. Select the Usage Logs icon from the left column. A Usage Logs screen -appears, like the one shown in the following figure: -+ -image:logs.png[View usage logs] +* *Resource Planning*: Usage logs can provide data on the number of image pulls, pushes, and overall traffic to your registry. -. From the Usage Logs page, you can: - * Set the date range for viewing log entries by adding dates to the From and to boxes. By default, the most recent one week of log entries is displayed. - * Type a string into the Filter Logs box to display log entries that container the given string. - * Toggle the arrow to the left of any log entry to see more or less text associated with that log entry. +* *User Activity*: Logs can help you track user activity, showing which users are accessing and interacting with images in the registry. This can be useful for auditing, understanding user behavior, and managing access controls. -[[use-quay-export-logs]] -== Exporting repository logs -To be able to grab a larger number of log files and save them outside of the {productname} database, you can use the Export Logs feature. Here are a few things you should know about using Export Logs: +* *Usage Patterns*: By studying usage patterns, you can gain insights into which images are popular, which versions are frequently used, and which images are rarely accessed. This information can help prioritize image maintenance and cleanup efforts. -* You can choose a range of dates for the logs you want to gather from a repository. +* *Security Auditing*: Usage logs enable you to track who is accessing images and when. This is crucial for security auditing, compliance, and investigating any unauthorized or suspicious activity. -* You can request that the logs be sent to you via an email attachment or directed to a callback URL. +* *Image Lifecycle Management*: Logs can reveal which images are being pulled, pushed, and deleted. This information is essential for managing image lifecycles, including deprecating old images and ensuring that only authorized images are used. -* You need Admin privilege to the repository or namespace to export logs +* *Compliance and Regulatory Requirements*: Many industries have compliance requirements that mandate tracking and auditing of access to sensitive resources. Usage logs can help you demonstrate compliance with such regulations. -* A maximum of 30 days of log data can be exported at a time +* *Identifying Abnormal Behavior*: Unusual or abnormal patterns in usage logs can indicate potential security breaches or malicious activity. Monitoring these logs can help you detect and respond to security incidents more effectively. -* Export Logs only gathers log data that was previously produced. It does not stream logging data. +* *Trend Analysis*: Over time, usage logs can provide trends and insights into how your registry is being used. This can help you make informed decisions about resource allocation, access controls, and image management strategies. -* Your {productname} instance must be configured for external storage for this feature (local storage will not work). +There are multiple ways of accessing log files: -* Once the logs are gathered and available, you should immediately copy that data if you want to save it. By default, the data expires in an hour. +* Viewing logs through the web UI. +* Exporting logs so that they can be saved externally. +* Accessing log entries using the API. -To use the Export Logs feature: +To access logs, you must have administrative privileges for the selected repository or namespace. -. Select a repository for which you have Admin privileges. -. Select the Usage Logs icon from the left column. A Usage Logs screen appears. -. Choose the From and to date range of the log entries you want to gather. -. Select the Export Logs button. An Export Usage Logs pop-up appears, as shown -+ -image:export-usage-logs.png[Enter email or callback URL to receive exported logs] - -. Enter the email address or callback URL you want to receive the exported -logs. For the callback URL, you could use a URL to a place such as webhook.site. -. Select Start Logs Export. This causes {productname} to begin gathering the -selected log entries. Depending on the amount of logging data being gathered, -this can take anywhere from one minute to an hour to complete. -. When the log export is completed you will either: - - Receive an email, alerting you to the availability of your requested exported -log entries. - - See a successful status of your log export request from the webhook URL. A -link to the exported data will be available for you to select to download the logs. - -Keep in mind that the URL points to a location in your {productname} external -storage and is set to expire within an hour. So make sure you copy the exported -logs before that expiration time if you intend to keep them. +[NOTE] +==== +A maximum of 100 log results are available at a time via the API. +To gather more results that that, you must use the log exporter feature +described in this chapter. +==== \ No newline at end of file diff --git a/modules/proxy-cache-leveraging-storage-quota-limits.adoc b/modules/proxy-cache-leveraging-storage-quota-limits.adoc index c10002471..de0ea9fbd 100644 --- a/modules/proxy-cache-leveraging-storage-quota-limits.adoc +++ b/modules/proxy-cache-leveraging-storage-quota-limits.adoc @@ -42,7 +42,7 @@ $ podman pull quay-server.example.com/proxytest/projectquay/quay:3.6.2 * Click *Tags* in the navigation pane and ensure that `quay:3.7.9` and `quay:3.6.2` are tagged. -. Pull the last image that will result in your repository exceeding the the allotted quota, for example: +. Pull the last image that will result in your repository exceeding the allotted quota, for example: + ---- $ podman pull quay-server.example.com/proxytest/projectquay/quay:3.5.1 diff --git a/modules/proxy-cache-limitations.adoc b/modules/proxy-cache-limitations.adoc index c0379fbb6..1ea52829e 100644 --- a/modules/proxy-cache-limitations.adoc +++ b/modules/proxy-cache-limitations.adoc @@ -6,3 +6,5 @@ Proxy caching with {productname} has the following limitations: * Your proxy cache must have a size limit of greater than, or equal to, the image you want to cache. For example, if your proxy cache organization has a maximum size of 500 MB, and the image a user wants to pull is 700 MB, the image will be cached and will overflow beyond the configured limit. * Cached images must have the same properties that images on a Quay repository must have. + +* Currently, only layers requested by the client are cached. diff --git a/modules/proxy-cache-procedure.adoc b/modules/proxy-cache-procedure.adoc index ae991ddc6..bae33a543 100644 --- a/modules/proxy-cache-procedure.adoc +++ b/modules/proxy-cache-procedure.adoc @@ -12,40 +12,36 @@ The following procedure describes how you can use {productname} to proxy a remot .Procedure -. In your Quay organization on the UI, for example, `cache-quayio`, click *Organization Settings* on the left hand pane. +. On the {productname} v2 UI, click the name of an organization, for example, *cache-org*. -. Optional: Click *Add Storage Quota* to configure quota management for your organization. For more information about quota management, see link:https://access.redhat.com//documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[Quota Management]. -+ -[NOTE] -==== -In some cases, pulling images with Podman might return the following error when quota limit is reached during a pull: `unable to pull image: Error parsing image configuration: Error fetching blob: invalid status code from registry 403 (Forbidden)`. Error `403` is inaccurate, and occurs because Podman hides the correct API error: `Quota has been exceeded on namespace`. This known issue will be fixed in a future Podman update. -==== +. In the navigation pane, click *Settings*. -. In *Remote Registry* enter the name of the remote registry to be cached, for example, `quay.io`, and click *Save*. +. In the *Remote Registry* box, enter the name of the remote registry to be cached, for example, `quay.io`, and click *Save*. + [NOTE] ==== By adding a namespace to the *Remote Registry*, for example, `quay.io/`, users in your organization will only be able to proxy from that namespace. ==== -. Optional: Add a *Remote Registry Username* and *Remote Registry Password*. -+ -[NOTE] -==== -If you do not set a *Remote Registry Username* and *Remote Registry Password*, you cannot add one without removing the proxy cache and creating a new registry. -==== +. Optional. In the *Remote Registry username* box, enter the username for authenticating into the remote registry specified in the previous step. For anonymous pulls from the upstream, you can leave this empty. If you do note set a username at the time of creation, you cannot add one without removing the proxy cache and creating a new registry. + +. Optional. In the *Remote registry password* box, enter the password for authenticating into the remote registry. For anonymous pulls from the upstream, you can leave this empty. If you do note set a username at the time of creation, you cannot add one without removing the proxy cache and creating a new registry. -. Optional: Set a time in the *Expiration* field. +. Optional. Set a time in the *Expiration* field. + [NOTE] ==== -* The default tag *Expiration* field for cached images in a proxy organization is set to 86400 seconds. In the proxy organization, the tag expiration is refreshed to the value set in the UI's *Expiration* field every time the tag is pulled. This feature is different than Quay's default link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/use_red_hat_quay/index#tag-expiration[individual tag expiration] feature. In a proxy organization, it is possible to override the individual tag feature. When this happens, the individual tag's expiration is reset according to the *Expiration* field of the proxy organization. -* Expired images will disappear after the allotted time, but are still stored in Quay. The time in which an image is completely deleted, or collected, depends on the *Time Machine* setting of your organization. The default time for garbage collection is 14 days unless otherwise specified. +* The default tag *Expiration* field for cached images in a proxy organization is set to 86400 seconds. In the proxy organization, the tag expiration is refreshed to the value set in the UI's *Expiration* field every time the tag is pulled. This feature is different than Quay's default link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#tag-expiration[individual tag expiration] feature. In a proxy organization, it is possible to override the individual tag feature. When this happens, the individual tag's expiration is reset according to the *Expiration* field of the proxy organization. +* Expired images will disappear after the allotted time, but are still stored in {productname}. The time in which an image is completely deleted, or collected, depends on the *Time Machine* setting of your organization. The default time for garbage collection is 14 days unless otherwise specified. ==== +. Optional. Check the *http* box if you want an unsecure protocol used. If not checked, https is used to request the remote registry. + . Click *Save*. -. On the CLI, pull a public image from the registry, for example, quay.io, acting as a proxy cache: +.Verification + +. On the CLI, pull a public image from the remote registry that was specified, for example, `quay.io`, acting as a proxy cache: + ---- $ podman pull /// diff --git a/modules/quay-api-examples.adoc b/modules/quay-api-examples.adoc new file mode 100644 index 000000000..467fdeb4e --- /dev/null +++ b/modules/quay-api-examples.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="quay-api-examples"] += {productname} API examples + +The remainder of this chapter provides {productname} API examples for the features in which they are available. \ No newline at end of file diff --git a/modules/quay-as-cache-proxy.adoc b/modules/quay-as-cache-proxy.adoc index 1f52845dc..1b72dfb5f 100644 --- a/modules/quay-as-cache-proxy.adoc +++ b/modules/quay-as-cache-proxy.adoc @@ -5,17 +5,12 @@ With the growing popularity of container development, customers increasingly rel With this feature, {productname} will act as a proxy cache to circumvent pull-rate limitations from upstream registries. Adding a cache feature also accelerates pull performance, because images are pulled from the cache rather than upstream dependencies. Cached images are only updated when the upstream image digest differs from the cached image, reducing rate limitations and potential throttling. -With the {productname} cache proxy technology preview, the following features are available: +With {productname} cache proxy, the following features are available: * Specific organizations can be defined as a cache for upstream registries. * Configuration of a Quay organization that acts as a cache for a specific upstream registry. This repository can be defined by using the Quay UI, and offers the following configurations: ** Upstream registry credentials for private repositories or increased rate limiting. ** Expiration timer to avoid surpassing cache organization size. -+ -[NOTE] -==== -Because cache proxy is still marked as `Technology Preview`, there is no storage quota support yet. When this feature goes `General Availability` in a future release of {productname}, the expiration timer will be supplemented by another timer that protects against intermittent upstream registry issues. -==== * Global on/off configurable via the configuration application. * Caching of entire upstream registries or just a single namespace, for example, all of `docker.io` or just `docker.io/library`. * Logging of all cache pulls. diff --git a/modules/quay-bridge-operator-test.adoc b/modules/quay-bridge-operator-test.adoc new file mode 100644 index 000000000..2b3115221 --- /dev/null +++ b/modules/quay-bridge-operator-test.adoc @@ -0,0 +1,190 @@ +:_content-type: CONCEPT +[id="quay-bridge-operator-test"] += Using {qbo} + +Use the following procedure to use the {qbo}. + +.Prerequisites + +* You have installed the {productname} Operator. +* You have logged into {ocp} as a cluster administrator. +* You have logged into your {productname} registry. +* You have installed the {qbo}. +* You have configured the `QuayIntegration` custom resource. + +.Procedure + +. Enter the following command to create a new {ocp} project called `e2e-demo`: ++ +[source,terminal] +---- +$ oc new-project e2e-demo +---- + +. After you have created a new project, a new Organization is created in {productname}. Navigate to the {productname} registry and confirm that you have created a new Organization named `openshift_e2e-demo`. ++ +[NOTE] +==== +The `openshift` value of the Organization might different if the clusterID in your `QuayIntegration` resource used a different value. +==== + +. On the {productname} UI, click the name of the new Organization, for example, *openshift_e2e-demo*. + +. Click *Robot Accounts* in the navigation pane. As part of new project, the following Robot Accounts should have been created: ++ +* *openshift_e2e-demo+deployer* +* *openshift_e2e-demo+default* +* *openshift_e2e-demo+builder* + +. Enter the following command to confirm three secrets containing Docker configuration associated with the applicable Robot Accounts were created: ++ +[source,terminal] +---- +$ oc get secrets builder-quay-openshift deployer-quay-openshift default-quay-openshift +---- ++ +.Example output ++ +[source,terminal] +---- +stevsmit@stevsmit ocp-quay $ oc get secrets builder-quay-openshift deployer-quay-openshift default-quay-openshift +NAME TYPE DATA AGE +builder-quay-openshift kubernetes.io/dockerconfigjson 1 77m +deployer-quay-openshift kubernetes.io/dockerconfigjson 1 77m +default-quay-openshift kubernetes.io/dockerconfigjson 1 77m +---- + +. Enter the following command to display detailed information about `builder` ServiceAccount (SA), including its secrets, token expiration, and associated roles and role bindings. This ensures that the project is integrated via the {qbo}. ++ +[source,terminal] +---- +$ oc describe sa builder default deployer +---- ++ +.Example output ++ +[source,terminal] +---- +... +Name: builder +Namespace: e2e-demo +Labels: +Annotations: +Image pull secrets: builder-dockercfg-12345 + builder-quay-openshift +Mountable secrets: builder-dockercfg-12345 + builder-quay-openshift +Tokens: builder-token-12345 +Events: +... +---- + +. Enter the following command to create and deploy a new application called `httpd-template`: ++ +[source,terminal] +---- +$ oc new-app --template=httpd-example +---- ++ +.Example output ++ +[source,terminal] +---- +--> Deploying template "e2e-demo/httpd-example" to project e2e-demo +... +--> Creating resources ... + service "httpd-example" created + route.route.openshift.io "httpd-example" created + imagestream.image.openshift.io "httpd-example" created + buildconfig.build.openshift.io "httpd-example" created + deploymentconfig.apps.openshift.io "httpd-example" created +--> Success + Access your application via route 'httpd-example-e2e-demo.apps.quay-ocp.gcp.quaydev.org' + Build scheduled, use 'oc logs -f buildconfig/httpd-example' to track its progress. + Run 'oc status' to view your app. +---- ++ +After running this command, `BuildConfig`, `ImageStream`, `Service,` `Route`, and `DeploymentConfig` resources are created. When the `ImageStream` resource is created, an associated repository is created in {productname}. For example: ++ +image:e2e-demo-httpd-example.png[Example repository] + +. The `ImageChangeTrigger` for the `BuildConfig` triggers a new Build when the Apache HTTPD image, located in the `openshift` namespace, is resolved. As the new Build is created, the `MutatingWebhookConfiguration` automatically rewriters the output to point at {productname}. You can confirm that the build is complete by querying the output field of the build by running the following command: ++ +[source,terminal] +---- +$ oc get build httpd-example-1 --template='{{ .spec.output.to.name }}' +---- ++ +.Example output ++ +[source,terminal] +---- +example-registry-quay-quay-enterprise.apps.quay-ocp.gcp.quaydev.org/openshift_e2e-demo/httpd-example:latest +---- + +. On the {productname} UI, navigate to the `openshift_e2e-demo` Organization and select the *httpd-example* repository. + +. Click *Tags* in the navigation pane and confirm that the `latest` tag has been successfully pushed. + +. Enter the following command to ensure that the latest tag has been resolved: ++ +[source,terminal] +---- +$ oc describe is httpd-example +---- ++ +.Example output ++ +[source,terminal] +---- +Name: httpd-example +Namespace: e2e-demo +Created: 55 minutes ago +Labels: app=httpd-example + template=httpd-example +Description: Keeps track of changes in the application image +Annotations: openshift.io/generated-by=OpenShiftNewApp + openshift.io/image.dockerRepositoryCheck=2023-10-02T17:56:45Z +Image Repository: image-registry.openshift-image-registry.svc:5000/e2e-demo/httpd-example +Image Lookup: local=false +Unique Images: 0 +Tags: 1 + +latest + tagged from example-registry-quay-quay-enterprise.apps.quay-ocp.gcp.quaydev.org/openshift_e2e-demo/httpd-example:latest +---- + +. After the `ImageStream` is resolwillved, a new deployment should have been triggered. Enter the following command to generate a URL output: ++ +[source,terminal] +---- +$ oc get route httpd-example --template='{{ .spec.host }}' +---- ++ +.Example output ++ +[source,terminal] +---- +httpd-example-e2e-demo.apps.quay-ocp.gcp.quaydev.org +---- + +. Navigate to the URL. If a sample webpage appears, the deployment was successful. + +. Enter the following command to delete the resources and clean up your {productname} repository: ++ +[source,terminal] +---- +$ oc delete project e2e-demo +---- ++ +[NOTE] +==== +The command waits until the project resources have been removed. This can be bypassed by adding the `--wait=false` to the above command +==== + +. After the command completes, navigate to your {productname} repository and confirm that the `openshift_e2e-demo` Organization is no longer available. + +.Additional resources + +* Best practices dictate that all communication between a client and an image registry be facilitated through secure means. Communication should leverage HTTPS/TLS with a certificate trust between the parties. While {productname} can be configured to serve an insecure configuration, proper certificates should be utilized on the server and configured on the client. Follow the link:https://docs.openshift.com/container-platform/{ocp-y}/security/certificate_types_descriptions/proxy-certificates.html[{ocp} documentation] for adding and managing certificates at the container runtime level. + diff --git a/modules/quay-error-details.adoc b/modules/quay-error-details.adoc new file mode 100644 index 000000000..6760f0980 --- /dev/null +++ b/modules/quay-error-details.adoc @@ -0,0 +1,38 @@ +:_content-type: PROCEDURE +[id="quay-error-details"] += Obtaining {productname} API error details + +{productname} API error details are discoverable by using the API. + +Use the following procedure to discover error details. + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +* You can obtain error details of the API by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#geterrordescription[`GET /api/v1/error/{error_type}`] endpoint. Note that you must include one of the following error codes: ++ +[options="header", width=100%, cols=".^2a,.^5a"] +|=== +|HTTP Code|Description +|200|Successful invocation +|400|Bad Request +|401|Session required +|403|Unauthorized access +|404|Not found +|=== ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/error/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +curl: (7) Failed to connect to quay-server.example.com port 443 after 0 ms: Couldn't connect to server +---- diff --git a/modules/quay-robot-accounts-intro.adoc b/modules/quay-robot-accounts-intro.adoc index e1fa68c2d..c41690c28 100644 --- a/modules/quay-robot-accounts-intro.adoc +++ b/modules/quay-robot-accounts-intro.adoc @@ -11,4 +11,4 @@ Teams and users can belong to multiple organizations. ==== //should probably be an xref -For more information on robot accounts, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/use_red_hat_quay/index#allow-robot-access-user-repo[Allowing robot access to a user repository]. +For more information on robot accounts, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#allow-robot-access-user-repo[Allowing robot access to a user repository]. diff --git a/modules/quayio-main-page.adoc b/modules/quayio-main-page.adoc new file mode 100644 index 000000000..77126ffb7 --- /dev/null +++ b/modules/quayio-main-page.adoc @@ -0,0 +1,230 @@ +:_content-type: CONCEPT +[id="quayio-main-page"] += {quayio} landing page + +The link:quay.io[Quay.io] landing page serves as the central hub for users to access the container registry services offered. This page provides essential information and links to guide users in securely storing, building, and deploying container images effortlessly. + +The landing page of {quayio} includes links to the following resources: + +* link:https://quay.io/search[Explore]. On this page, you can search the {quayio} database for various applications and repositories. +* link:https://quay.io/tutorial/[Tutorial]. On this page, you can take a step-by-step walkthrough that shows you how to use {quayio}. +* link:https://quay.io/plans/[Pricing]. On this page, you can learn about the various pricing tiers offered for {quayio}. There are also various FAQs addressed on this page. +* link:https://quay.io/signin/[Sign in]. By clicking this link, you are re-directed to sign into your {quayio} repository. + +image:quayio-header.png[{quayio} header]. + +The landing page also includes information about scheduled maintenance. During scheduled maintenance, {quayio} is operational in read-only mode, and pulls function as normal. Pushes and builds are non-operational during scheduled maintenance. You can subscribe to updates regarding {quayio} maintenance by navigating to link:https://status.quay.io/incidents/kzyx3gh434cr[{quayio} *Status* page] and clicking *Subscribe To Updates*. + +image:scheduled-maintenance-banner.png[Scheduled maintenance banner] + +The landing page also includes links to the following resources: + +* link:https://docs.projectquay.io/welcome.html[Documentation]. This page provides documentation for using {quayio}. +* link:https://cloud.redhat.com/legal/terms[Terms]. This page provides legal information about Red Hat Online Services. +* link:https://www.redhat.com/en/about/privacy-policy[Privacy]. This page provides information about Red Hat's Privacy Statement. +* link:https://quay.io/security/[Security]. this page provides information about {quayio} security, including SSL/TLS, encryption, passwords, access controls, firewalls, and data resilience. +* link:https://quay.io/about/[About]. This page includes information about packages and projects used and a brief history of the product. +* link:https://access.redhat.com/articles/quayio-help[Contact]. This page includes information about support and contacting the Red Hat Support Team. +* link:https://status.quay.io/[All Systems Operational]. This page includes information the status of {quayio} and a brief history of maintenance. +* Cookies. By clicking this link, a popup box appears that allows you to set your cookie preferences. + +image:quayio-footer.png[{quayio} footer]. + +You can also find information about link:https://www.redhat.com/en/technologies/cloud-computing/quay[Trying {productname} on premise] or link:https://quay.io/plans/[Trying {productname} on the cloud], which redirects you to the *Pricing* page. Each option offers a free trial. + +[id="pricsignining-page-quayio"] +== Creating a {quayio} account + +New users of {quayio} are required to both link:https://sso.redhat.com/auth/realms/redhat-external/login-actions/registration?client_id=quay.io&tab_id=6C6R-5nPDLo[Register for a Red Hat account] and create a {quayio} username. These accounts are correlated, with two distinct differences: + +* The {quayio} account can be used to push and pull container images or Open Container Initiative images to {quayio} to store images. +* The Red Hat account provides users access to the {quayio} user interface. For paying customers, this account can also be used to access images from link:registry.redhat.io[the Red Hat Ecosystem Catalog], which can be pushed to their {quayio} repository. + +Users must first register for a Red Hat account, and then create a {quayio} account. Users need both accounts to properly use all features of {quayio}. + +[id="registering-red-hat-account"] +=== Registering for a Red Hat Account + +Use the following procedure to register for a Red Hat account for {quayio}. + +.Procedure + +. Navigate to the link:https://access.redhat.com/[Red Hat Customer Portal]. + +. In navigation pane, click *Log In*. + +. When navigated to the log in page, click *Register for a Red Hat Account*. + +. Enter a Red Hat login ID. + +. Enter a password. + +. Enter the following personal information: ++ +* *First name* +* *Last name* +* *Email address* +* *Phone number* + +. Enter the following contact information that is relative to your country or region. For example: ++ +* *Country/region* +* *Address* +* *Postal code* +* *City* +* *County* + +. Select and agree to Red Hat's terms and conditions. + +. Click *Create my account*. + +. Navigate to {quayio} and log in. + +[id="creating-first-quayio-user-account"] +=== Creating a {quayio} user account + +Use the following procedure to create a {quayio} user account. + +.Prerequisites + +* You have created a Red Hat account. + +.Procedure + +. If required, resolve the captcha by clicking *I am not a robot* and confirming. You are redirected to a *Confirm Username* page. + +. On the *Confirm Username* page, enter a username. By default, a username is generated. If the same username already exists, a number is added at the end to make it unique. This username is be used as a namespace in the Quay Container Registry. + +. After deciding on a username, click *Confirm Username*. You are redirected to the {quayio} *Repositories* page, which serves as a dedicated hub where users can access and manage their repositories with ease. From this page, users can efficiently organize, navigate, and interact with their container images and related resources. + +[id="quayio-rh-sso-support"] +=== {quayio} Single Sign On support + +Red Hat Single Sign On (SSO) can be used with {quayio}. Use the following procedure to set up Red Hat SSO with {quayio}. For most users, these accounts are already linked. However, for some legacy {quayio} users, this procedure might be required. + +.Prerequisites + +* You have created a {quayio} account. + +.Procedure + +. Navigate to to the link:recovery.quay.io[{quayio} *Recovery* page]. + +. Enter your username and password, then click *Sign in to Quay Container Registry*. + +. In the navigation pane, click your username -> *Account Settings*. + +. In the navigation pane, click *External Logins and Applications*. + +. Click *Attach to Red Hat*. + +. If you are already signed into Red Hat SSO, your account is automatically linked. Otherwise, you are prompted to sign into Red Hat SSO by entering your Red Hat login or email, and the password. Alternatively, you might need to create a new account first. ++ +After signing into Red Hat SSO, you can choose to authenticate against {quayio} using your Red Hat account from the login page. + +[discrete] +=== Additional resources +* For more information, see +link:https://access.redhat.com/articles/5363231[Quay.io Now Supports Red Hat Single Sign On]. + + +[id="explore-page-quayio"] +== Exploring {quayio} + +The {quayio} link:https://quay.io/search[*Explore*] page is a valuable hub that allows users to delve into a vast collection of container images, applications, and repositories shared by the {quayio} community. With its intuitive and user-friendly design, the *Explore* page offers a powerful search function, enabling users to effortlessly discover containerized applications and resources. + +[id="tutorial-page-quayio"] +== Trying {quayio} (deprecated) + +[NOTE] +==== +The {productname} tutorial is currently deprecated and will be removed when the v2 UI goes generally available (GA). +==== + +The {quayio} link:https://quay.io/tutorial[*Tutorial*] page offers users and introduction to the {quayio} container registry service. By clicking *Continue Tutorial* users learn how to perform the following features on {quayio}: + +* Logging into Quay Container Registry from the Docker CLI +* Starting a container +* Creating images from a container +* Pushing a repository to Quay Container Registry +* Viewing a repository +* Setting up build triggers +* Changing a repository's permissions + +[id="pricing-page-quayio"] +== Information about {quayio} pricing + +In addition to a free tier, {quayio} also offers several paid plans that have enhanced benefits. + +The {quayio} *Pricing* page offers information about {quayio} plans and the associated prices of each plan. The cost of each tier can be found on the link:https://quay.io/plans/[*Pricing*] page. All {quayio} plans include the following benefits: + +* Continuous integration +* Public repositories +* Robot accounts +* Teams +* SSL/TLS encryption +* Logging and auditing +* Invoice history + +{quayio} subscriptions are handled by the link:https://stripe.com[Stripe] payment processing platform. A valid credit card is required to sign up for {quayio}. + +To sign up for {quayio}, use the following procedure. + +.Procedure + +. Navigate to the link:https://quay.io/plans/[{quayio} *Pricing* page]. + +. Decide on a plan, for example, *Small*, and click *Buy Now*. You are redirected to the *Create New Organization* page. Enter the following information: ++ +* *Organization Name* +* *Organization Email* +* Optional. You can select a different plan if you want a plan larger, than, for example, *Small*. + +. Resolve that captcha, and select *Create Organization*. + +. You are redirected to Stripe. Enter the following information: ++ +* *Card information*, including *MM/YY* and the *CVC* +* *Name on card* +* *Country or region* +* *ZIP* (if applicable) +* Check the box if you want your information to be saved. +* *Phone Number* + +. Click *Subscribe* after all boxes have been filled. + +//// +[id="pricing-page-faq"] +=== *Pricing* FAQ + +The following questions are commonly asked in regards to a {quayio} subscription. + +* *How do I use Quay with my servers and code?* ++ +Using Quay with your infrastructure is separated into two main actions: building containers and distributing them to your servers. ++ +You can configure Quay to automatically build containers of your code on each commit. Integrations with GitHub, Bitbucket, GitLab and self-hosted Git repositories are supported. Each built container is stored on Quay and is available to be pulled down onto your servers. ++ +To distribute your private containers onto your servers, Docker or rkt must be configured with the correct credentials. Quay has sophisticated access controls — organizations, teams, robot accounts, and more — to give you full control over which servers can pull down your containers. An API can be used to automate the creation and management of these credentials. + +* *How is Quay optimized for a team environment?* ++ +Quay's permission model is designed for teams. Each new user can be assigned to one or more teams, with specific permissions. Robot accounts, used for automated deployments, can be managed per team as well. This system allows for each development team to manage their own credentials. ++ +Full logging and auditing is integrated into every part of the application and API. Quay helps you dig into every action for more details. +Additional FAQs + +* *Can I change my plan?* ++ +Yes, you can change your plan at any time and your account will be pro-rated for the difference. For large organizations, Red Hat Quay offers unlimited users and repos. +Do you offer special plans for business or academic institutions? ++ +Please contact us at our support email address to discuss the details of your organization and intended usage. + +* *Can I use Quay for free?* ++ +Yes! We offer unlimited storage and serving of public repositories. We strongly believe in the open source community and will do what we can to help! +What types of payment do you accept? ++ +Quay uses Stripe as our payment processor, so we can accept any of the payment options they offer, which are currently: Visa, MasterCard, American Express, JCB, Discover and Diners Club. +//// \ No newline at end of file diff --git a/modules/quayio-overview.adoc b/modules/quayio-overview.adoc new file mode 100644 index 000000000..a6db3819f --- /dev/null +++ b/modules/quayio-overview.adoc @@ -0,0 +1,15 @@ +:_content-type: CONCEPT +[id="quayio-overview"] += {quayio} overview + +{quayio} is a registry for storing and building container images, but can also be used to distribute both container images and other artifacts. It offers both free and paid tiers to cater to various user needs, and is primarily hosted in the United States (`us-east-1` region of Amazon Web Services) with CDN edge servers scattered throughout the world. + +{quayio} is flexible, easy to use, and allows users to upload and manage container images. Developers can create private repositories, ensuring sensitive or proprietary code remains secure within their organization. Additionally, users can set up access controls and manage team collaboration, enabling seamless sharing of container images among designated team members. + +{quayio} addresses container security concerns through its integrated image scanner, link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/index[Clair]. The service automatically scans container images for known vulnerabilities and security issues, providing developers with valuable insights into potential risks and suggesting remediation steps. + +{quayio} excels in automation and supports integration with popular Continuous Integration/Continuous Deployment (CI/CD) tools and platforms, enabling seamless automation of the container build and deployment processes. As a result, developers can streamline their workflows, significantly reducing manual intervention and improving overall development efficiency. + +{quayio} caters to the needs of both large and small-scale deployments. The platform can handle significant container image traffic and offers efficient replication and distribution mechanisms to deliver container images to various geographical locations. + +With {quayio}, developers can discover a collection of pre-built, public container images shared by other users, making it easier to find useful tools, applications, and services for their projects. \ No newline at end of file diff --git a/modules/quayio-support.adoc b/modules/quayio-support.adoc new file mode 100644 index 000000000..b60fd5a76 --- /dev/null +++ b/modules/quayio-support.adoc @@ -0,0 +1,18 @@ +:_content-type: CONCEPT +[id="quayio-support"] += {quayio} support + +Technical support is a crucial aspect of the {quayio} container registry service, providing assistance not only in managing container images but also ensuring the functionality and availability of the hosted platform. + +To help users with functionality-related issues, Red Hat offers {quayio} customers access to several resources. The link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] contains valuable content to maximize the potential of Red Hat's products and technologies. Users can find articles, product documentation, and videos that outline best practices for installing, configuring, and utilizing Red Hat products. It also serves as a hub for solutions to known issues, providing concise root cause descriptions and remedial steps. + +Additionally, {quayio} customers can count on the technical support team to address questions, troubleshoot problems, and provide solutions for an optimized experience with the platform. Whether it involves understanding specific features, customizing configurations, or resolving container image build issues, the support team is dedicated to guiding users through each step with clarity and expertise. + +For incidents related to service disruptions or performance issues not listed on the link:https://status.quay.io/[Quay.io status page], which includes availability and functionality concerns, paying customers can raise a technical support ticket using the link:http://access.redhat.com[Red Hat Customer Portal]. A service incident is defined as an unplanned interruption of service or reduction in service quality, affecting multiple users of the platform. + +With this comprehensive technical support system in place, {quayio} ensures that users can confidently manage their container images, optimize their platform experience, and overcome any challenges that might arise. + +[role="_additional-resources"] +.Additional resources + +Current {productname} and {quayio} users can find more information about troubleshooting and support in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/troubleshooting_red_hat_quay[{productname} Troubleshooting guide]. diff --git a/modules/quayio-ui-overview.adoc b/modules/quayio-ui-overview.adoc new file mode 100644 index 000000000..500db76a6 --- /dev/null +++ b/modules/quayio-ui-overview.adoc @@ -0,0 +1,16 @@ +:_content-type: CONCEPT +[id="quayio-ui-overview"] += {quayio} user interface overview + +The user interface (UI) of {quayio} is a fundamental component that serves as the user's gateway to managing and interacting with container images within the platform's ecosystem. {quayio}'s UI is designed to provide an intuitive and user-friendly interface, making it easy for users of all skill levels to navigate and harness {quayio}'s features and functionalities. + +This documentation section aims to introduce users to the key elements and functionalities of {quayio}'s UI. It will cover essential aspects such as the UI's layout, navigation, and key features, providing a solid foundation for users to explore and make the most of {quayio}'s container registry service. + +Throughout this documentation, step-by-step instructions, visual aids, and practical examples are provided on the following topics: + +* Exploring applications and repositories +* Using the {quayio} tutorial +* Pricing and {quayio} plans +* Signing in and using {quayio} features + +Collectively, this document ensures that users can quickly grasp the UI's nuances and successfully navigate their containerization journey with {quayio}. \ No newline at end of file diff --git a/modules/quota-establishment-api.adoc b/modules/quota-establishment-api.adoc index 5b835148f..b7024c203 100644 --- a/modules/quota-establishment-api.adoc +++ b/modules/quota-establishment-api.adoc @@ -1,79 +1,62 @@ -[[quota-establishment-api]] -= Establishing quota with the {productname} API +[id="quota-establishment-api"] += Establishing quota for an organization with the {productname} API -When an organization is first created, it does not have a quota applied. Use the */api/v1/organization/{organization}/quota* endpoint: +When an organization is first created, it does not have an established quota. You can use the API to check, create, change, or delete quota limitations for an organization. -.Sample command -[source,terminal] ----- -$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq ----- +.Prerequisites -.Sample output -[source,terminal] ----- -[] ----- +* You have generated an OAuth access token. -== Setting the quota +.Procedure -To set a quota for an organization, POST data to the */api/v1/organization/{orgname}/quota* endpoint: -.Sample command +. To set a quota for an organization, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquota[`POST /api/v1/organization/{orgname}/quota`] endpoint: ++ [source,terminal] ---- -$ curl -k -X POST -H "Authorization: Bearer " -H 'Content-Type: application/json' -d '{"limit_bytes": 10485760}' https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/organization/testorg/quota | jq +$ curl -X POST "https:///api/v1/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240, + "limits": "10 Gi" + }' ---- - -.Sample output ++ +.Example output [source,terminal] ---- "Created" ---- -== Viewing the quota - -To see the applied quota, `GET` data from the */api/v1/organization/{orgname}/quota* endpoint: - -.Sample command +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorganizationquota[`GET /api/v1/organization/{orgname}/quota`] command to see if your organization already has an established quota: ++ [source,terminal] ---- -$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https:///api/v1/organization//quota | jq ---- - -.Sample output -[source,json] ++ +.Example output +[source,terminal] ---- -[ - { - "id": 1, - "limit_bytes": 10485760, - "default_config": false, - "limits": [], - "default_config_exists": false - } -] +[{"id": 1, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false}] ---- -== Modifying the quota - -To change the existing quota, in this instance from 10 MB to 100 MB, PUT data to the */api/v1/organization/{orgname}/quota/{quota_id}* endpoint: - -.Sample command +. You can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquota[`PUT /api/v1/organization/{orgname}/quota/{quota_id}`] command to modify the existing quota limitation. For example: ++ [source,terminal] ---- -$ curl -k -X PUT -H "Authorization: Bearer " -H 'Content-Type: application/json' -d '{"limit_bytes": 104857600}' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota/1 | jq +$ curl -X PUT "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' ---- - -.Sample output ++ +.Example output [source,json] ---- -{ - "id": 1, - "limit_bytes": 104857600, - "default_config": false, - "limits": [], - "default_config_exists": false -} +{"id": 1, "limit_bytes": 21474836480, "limit": "20.0 GiB", "default_config": false, "limits": [], "default_config_exists": false} ---- == Pushing images @@ -102,7 +85,7 @@ To view the storage consumed, `GET` data from the */api/v1/repository* endpoint: .Sample command [source,terminal] ---- -$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true"a=true' | jq +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true' | jq ---- .Sample output @@ -148,7 +131,7 @@ $ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs .Sample command [source,terminal] ---- -$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true"a=true' +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true' ---- + .Sample output diff --git a/modules/quota-establishment-ui.adoc b/modules/quota-establishment-ui.adoc index cd47e2dad..122b1ef4a 100644 --- a/modules/quota-establishment-ui.adoc +++ b/modules/quota-establishment-ui.adoc @@ -1,4 +1,5 @@ -[[quota-establishment-ui]] +:_content-type: CONCEPT +[id="quota-establishment-ui"] = Establishing quota in {productname} UI The following procedure describes how you can report storage consumption and establish storage quota limits. @@ -36,15 +37,24 @@ image:quota-org-init-consumed.png[Initial consumed quota] + image:quota-su-increase-100MB.png[Increase quota] -. Push a sample image to the organization from the command line: +. Pull a sample image by entering the following command: + -.Sample commands [source,terminal] ---- $ podman pull ubuntu:18.04 +---- +. Tag the sample image by entering the following command: ++ +[source,terminal] +---- $ podman tag docker.io/library/ubuntu:18.04 example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:18.04 +---- +. Push the sample image to the organization by entering the following command: ++ +[source,terminal] +---- $ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:18.04 ---- @@ -57,15 +67,24 @@ image:quota-su-consumed-first.png[Total Quota Consumed for first image] .Total Quota Consumed for first image image:quota-org-consumed-first.png[Total Quota Consumed for first image] -. Pull, tag, and push a second image, for example, `nginx`: +. Pull a second sample image by entering the following command: + -.Sample commands [source,terminal] ---- $ podman pull nginx +---- +. Tag the second image by entering the following command: ++ +[source,terminal] +---- $ podman tag docker.io/library/nginx example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/nginx +---- +. Push the second image to the organization by entering the following command: ++ +[source,terminal] +---- $ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/nginx ---- @@ -90,7 +109,7 @@ image:quota-org-quota-policy.png[Quota policy in organization settings] . Push an image where the reject limit is exceeded: + -Because the reject limit (80%) has been set to below the current repository size (~83%), the next push is rejected automatically. +Because the reject limit (80%) has been set to below the current repository size (~83%), the next pushed image is rejected automatically. + .Sample image push [source,terminal] diff --git a/modules/quota-limit-api.adoc b/modules/quota-limit-api.adoc new file mode 100644 index 000000000..e2275e75f --- /dev/null +++ b/modules/quota-limit-api.adoc @@ -0,0 +1,90 @@ +[id="quota-limit-management-api"] += Setting quota limits for an organization with the {productname} API + +You can set specific quota limits for an organization so that, when exceeded, a warning is returned, or the pushed image is denied altogether. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquotalimit[`POST /api/v1/organization/{orgname}/quota/{quota_id}/limit`] command to create a quota policy that rejects images if they exceeded the allotted quota. For example: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//quota//limit" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 21474836480, + "type": "Reject", <1> + "threshold_percent": 90 <2> + }' +---- +<1> One of `Reject` or `Warning`. +<2> Quota threshold, in percent of quota. ++ +.Example output ++ +[source,terminal] +---- +"Created" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorganizationquotalimit[`GET /api/v1/organization/{orgname}/quota/{quota_id}/limit`] to obtain the ID of the quota limit. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota//limit" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 2, "type": "Reject", "limit_percent": 90}] +---- + +//// +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserquotalimit[`GET /api/v1/user/quota/{quota_id}/limit/{limit_id}`] endpoint to return information about the quota limit. Note that this requires the limit ID. For example: ++ +[source,terminal] +---- + +---- ++ +.Example output ++ +[source,terminal] +---- + +---- +//// + +. Update the policy with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquotalimit[`PUT /api/v1/organization/{orgname}/quota/{quota_id}/limit/{limit_id}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "type": "", + "threshold_percent": + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 3, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [{"id": 2, "type": "Warning", "limit_percent": 80}], "default_config_exists": false} +---- + +. You can delete the quota limit with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationquotalimit[`DELETE /api/v1/organization/{orgname}/quota/{quota_id}/limit/{limit_id}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/quota-limit-user-api.adoc b/modules/quota-limit-user-api.adoc new file mode 100644 index 000000000..53c182513 --- /dev/null +++ b/modules/quota-limit-user-api.adoc @@ -0,0 +1,67 @@ +[id="quota-limit-user-api"] += Obtaining quota limits for the user with the {productname} API + +You can specify quota and limitations for users so that, when exceeded, a warning is returned, or the pushed image is denied altogether. Quota limits for users must be set on the {productname} UI. The following APIs can be used to view the quota limits for the user that is logged in. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listuserquota[`GET /api/v1/user/quota`] command to return information about the quota limitations: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 4, "limit_bytes": 2199023255552, "limit": "2.0 TiB", "default_config": false, "limits": [], "default_config_exists": false}] +---- + +. After you have received the quota ID, you can pass it in with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserquota[`GET /api/v1/user/quota/{quota_id}`] endpoint to return information about the limitation: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 4, "limit_bytes": 2199023255552, "limit": "2.0 TiB", "default_config": false, "limits": [], "default_config_exists": false} +---- + +. The limitations can be viewed by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listuserquotalimit[`GET /api/v1/user/quota/{quota_id}/limit`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}/limit" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 3, "type": "Reject", "limit_percent": 100}] +---- + +. Additional information about the entire policy can be returned using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserquotalimit[`GET /api/v1/user/quota/{quota_id}/limit/{limit_id}`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}/limit/{limit_id}" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 4, "limit_bytes": 2199023255552, "limit": "2.0 TiB", "default_config": false, "limits": [{"id": 3, "type": "Reject", "limit_percent": 100}], "default_config_exists": false} + +---- \ No newline at end of file diff --git a/modules/quota-management-and-enforcement.adoc b/modules/quota-management-and-enforcement.adoc index e054391ba..30af5a653 100644 --- a/modules/quota-management-and-enforcement.adoc +++ b/modules/quota-management-and-enforcement.adoc @@ -1,10 +1,11 @@ -[[red-hat-quay-quota-management-and-enforcement]] -= {productname} quota management and enforcement +:_content-type: CONCEPT +[id="red-hat-quay-quota-management-and-enforcement"] += {productname} quota management and enforcement overview -With {productname} 3.7, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. On-premise Quay users are now equipped with the following capabilities to manage the capacity limits of their environment: +With {productname}, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. On-premise {productname} users are now equipped with the following capabilities to manage the capacity limits of their environment: * **Quota reporting:** With this feature, a superuser can track the storage consumption of all their organizations. Additionally, users can track the storage consumption of their assigned organization. * **Quota management:** With this feature, a superuser can define soft and hard checks for {productname} users. Soft checks tell users if the storage consumption of an organization reaches their configured threshold. Hard checks prevent users from pushing to the registry when storage consumption reaches the configured limit. -Together, these features allow service owners of a Quay registry to define service level agreements and support a healthy resource budget. +Together, these features allow service owners of a {productname} registry to define service level agreements and support a healthy resource budget. diff --git a/modules/quota-management-api.adoc b/modules/quota-management-api.adoc new file mode 100644 index 000000000..00178ec14 --- /dev/null +++ b/modules/quota-management-api.adoc @@ -0,0 +1,6 @@ +[id="quota-management-api"] += Establishing quota with the {productname} API + +You can establish quota for an organization or users, and tailor quota policies to suit the needs of your registry. + +The following sections show you how to establish quota for an organization, a user, and then how to modify those settings. \ No newline at end of file diff --git a/modules/quota-management-arch.adoc b/modules/quota-management-arch.adoc index 65da1ec04..42a360d7e 100644 --- a/modules/quota-management-arch.adoc +++ b/modules/quota-management-arch.adoc @@ -1,28 +1,32 @@ -[[quota-management-arch]] +:_content-type: CONCEPT +[id="quota-management-arch"] = Quota management architecture -//// -image:quota-management.png[Quota management architecture] +With the quota management feature enabled, individual blob sizes are summed at the repository and namespace level. For example, if two tags in the same repository reference the same blob, the size of that blob is only counted once towards the repository total. Additionally, manifest list totals are counted toward the repository total. -The preceding image shows the expected design flow and architecture of the Quota management feature. +[IMPORTANT] +==== +Because manifest list totals are counted toward the repository total, the total quota consumed when upgrading from a previous version of {productname} might be reportedly differently in {productname} 3.9. In some cases, the new total might go over a repository's previously-set limit. {productname} administrators might have to adjust the allotted quota of a repository to account for these changes. +==== + +The quota management feature works by calculating the size of existing repositories and namespace with a backfill worker, and then adding or subtracting from the total for every image that is pushed or garbage collected afterwords. Additionally, the subtraction from the total happens when the manifest is garbage collected. [NOTE] ==== -With {productname} 3.7, boxes outlined in black show the current flow, and boxes outlined in green show what need to be implemented to support this feature. +Because subtraction occurs from the total when the manifest is garbage collected, there is a delay in the size calculation until it is able to be garbage collected. For more information about garbage collection, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red_hat_quay_garbage_collection[{productname} garbage collection]. ==== -//// -The `RepositorySize` database table holds the storage consumption, in bytes, of a {productname} repository within an organization. The sum of all repository sizes for an organization defines the current storage size of a {productname} organization. When an image push is initialized, the user's organization storage is validated to check if it is beyond the configured quota limits. If an image push exceeds defined quota limitations, a soft or hard check occurs: +The following database tables hold the quota repository size, quota namespace size, and quota registry size, in bytes, of a {productname} repository within an organization: + +* `QuotaRepositorySize` +* `QuotaNameSpaceSize` +* `QuotaRegistrySize` + +The organization size is calculated by the backfill worker to ensure that it is not duplicated. When an image push is initialized, the user's organization storage is validated to check if it is beyond the configured quota limits. If an image push exceeds defined quota limitations, a soft or hard check occurs: * For a soft check, users are notified. * For a hard check, the push is stopped. If storage consumption is within configured quota limits, the push is allowed to proceed. -Image manifest deletion follows a similar flow, whereby the links between associated image tags and the manifest are deleted. Additionally, after the image manifest is deleted, the repository size is recalculated and updated in the `RepositorySize` table. - -//// -The image below represents the design flow and architecture when deleting an image manifest: - -image:manifest-deletion-architecture.png[Manifest deletion architecture] -//// \ No newline at end of file +Image manifest deletion follows a similar flow, whereby the links between associated image tags and the manifest are deleted. Additionally, after the image manifest is deleted, the repository size is recalculated and updated in the `QuotaRepositorySize`, `QuotaNameSpaceSize`, and `QuotaRegistrySize` tables. \ No newline at end of file diff --git a/modules/quota-management-limitations.adoc b/modules/quota-management-limitations.adoc index 00c25002d..88e9b0adf 100644 --- a/modules/quota-management-limitations.adoc +++ b/modules/quota-management-limitations.adoc @@ -1,4 +1,5 @@ -[[quota-management-limitations]] +:_content-type: REFERENCE +[id="quota-management-limitations"] = Quota management limitations Quota management helps organizations to maintain resource consumption. One limitation of quota management is that calculating resource consumption on push results in the calculation becoming part of the push's critical path. Without this, usage data might drift. diff --git a/modules/quota-management-permanent-delete-39.adoc b/modules/quota-management-permanent-delete-39.adoc new file mode 100644 index 000000000..cfe70bbe4 --- /dev/null +++ b/modules/quota-management-permanent-delete-39.adoc @@ -0,0 +1,53 @@ +:_content-type: PROCEDURE +[id="quota-management-permanent-delete-39"] += Permanently deleting an image tag in {productname} 3.9 + +In some cases, users might want to delete an image tag outside of the time machine window. Use the following procedure to manually delete an image tag permanently. + +[IMPORTANT] +==== +The results of the following procedure cannot be undone. Use with caution. +==== + +.Procedure + +. Ensure that the `PERMANENTLY_DELETE_TAGS` and `RESET_CHILD_MANIFEST_EXPIRATION` parameters are set to `true` in your `config.yaml` file. For example: ++ +[source,yaml] +---- +PERMANENTLY_DELETE_TAGS: true +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +. Restart your {productname} deployment. + +. If you are using the legacy {productname} UI: + +.. On the {productname} UI, click the name of your organization and then the name of your repository. + +.. Click *Tags*. + +.. Select the checkbox of the name of the tag that you want to delete, for example, `tag1`. + +.. Click *Actions* -> *Delete Tags*. When the popup box appears, click *Delete Tag*. + +.. Click *Tag History* in the navigation pane. + +.. In the *Permanently Delete* category, click *Delete tag1*. When the popup box appears, click *Permanently Delete Tag*. ++ +Now, the image tag is no long calculated toward the total. + +. If you are using the {productname} v2 UI: + +.. In the navigation pane, click *Organizations* -> and the name of your organization, for example, *quota-test*. + +.. Click the name of your repository, for example, *ubuntu*. + +.. Check the box of the tag that you want to delete. + +.. Click *Actions* -> *Permanently Delete*. When the popup box appears, click *Delete*. ++ +[NOTE] +==== +Currently, the {productname} v2 UI does not show the Total Quota Consumed. To see Total Quota Consumed, you must switch back to the legacy UI. +==== \ No newline at end of file diff --git a/modules/quota-management-query-39.adoc b/modules/quota-management-query-39.adoc new file mode 100644 index 000000000..9960f47fb --- /dev/null +++ b/modules/quota-management-query-39.adoc @@ -0,0 +1,27 @@ +:_content-type: PROCEDURE +[id="quota-management-query-39"] += Calculating the total registry size in {productname} 3.9 + +Use the following procedure to queue a registry total calculation. + +[NOTE] +==== +This feature is done on-demand, and calculating a registry total is database intensive. Use with caution. +==== + +.Prerequisites + +* You have upgraded to {productname} 3.9. +* You are logged in as a {productname} superuser. + +.Procedure + +. On the {productname} UI, click your username -> *Super User Admin Panel*. + +. In the navigation pane, click *Manage Organizations*. + +. Click *Calculate*, next to *Total Registry Size: 0.00 KB, Updated: Never , Calculation required*. Then, click *Ok*. + +. After a few minutes, depending on the size of your registry, refresh the page. Now, the Total Registry Size should be calculated. For example: ++ +image:total-registry-size.png[Total registry size] \ No newline at end of file diff --git a/modules/quota-management-testing-39.adoc b/modules/quota-management-testing-39.adoc new file mode 100644 index 000000000..7bad032cb --- /dev/null +++ b/modules/quota-management-testing-39.adoc @@ -0,0 +1,59 @@ +:_content-type: PROCEDURE +[id="quota-management-testing-39"] += Testing quota management for {productname} 3.9 + +With quota management configured for {productname} 3.9, duplicative images are now only counted once towards the repository total. + +Use the following procedure to test that a duplicative image is only counted once toward the repository total. + +.Prerequisites + +* You have configured quota management for {productname} 3.9. + +.Procedure + +. Pull a sample image, for example, `ubuntu:18.04`, by entering the following command: ++ +[source,terminal] +---- +$ podman pull ubuntu:18.04 +---- + +. Tag the same image twice by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/ubuntu:18.04 quay-server.example.com/quota-test/ubuntu:tag1 +---- ++ +[source,terminal] +---- +$ podman tag docker.io/library/ubuntu:18.04 quay-server.example.com/quota-test/ubuntu:tag2 +---- + +. Push the sample image to your organization by entering the following commands: ++ +[source,terminal] +---- +$ podman push --tls-verify=false quay-server.example.com/quota-test/ubuntu:tag1 +---- ++ +[source,terminal] +---- +$ podman push --tls-verify=false quay-server.example.com/quota-test/ubuntu:tag2 +---- + +. On the {productname} UI, navigate to *Organization* and click the *Repository Name*, for example, *quota-test/ubuntu*. Then, click *Tags*. There should be two repository tags, `tag1` and `tag2`, each with the same manifest. For example: ++ +image:manifest-example.png[Manifest example] ++ +However, by clicking on the *Organization* link, we can see that the *Total Quota Consumed* is *27.94 MB*, meaning that the Ubuntu image has only been accounted for once: ++ +image:total-quota-consumed.png[Total quota consumed] ++ +If you delete one of the Ubuntu tags, the *Total Quota Consumed* remains the same. ++ +[NOTE] +==== +If you have configured the {productname} time machine to be longer than `0` seconds, subtraction will not happen until those tags pass the time machine window. If you want to expedite permanent deletion, see Permanently deleting an image tag in {productname} 3.9. +==== \ No newline at end of file diff --git a/modules/quota-organization-management-api.adoc b/modules/quota-organization-management-api.adoc new file mode 100644 index 000000000..4fa96a0fb --- /dev/null +++ b/modules/quota-organization-management-api.adoc @@ -0,0 +1,85 @@ +[id="quota-organization-management-api"] += Managing organization quota with the {productname} API + +When an organization is first created, it does not have an established quota. You can use the API to check, create, change, or delete quota limitations for an organization. + +.Prerequisites + +* You have generated an OAuth access token. + +.Procedure + +. To set a quota for an organization, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquota[`POST /api/v1/organization/{orgname}/quota`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240, + "limits": "10 Gi" + }' +---- ++ +.Example output +[source,terminal] +---- +"Created" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorganizationquota[`GET /api/v1/organization/{orgname}/quota`] command to return information about the policy, including the ID number, which is required for other organization quota endpoints. For example: ++ +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq +---- ++ +.Example output +[source,terminal] +---- +[{"id": 1, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false}] +---- ++ +After you obtain the ID number, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationquota[`GET /api/v1/organization/{orgname}/quota/{quota_id}`] command to list the quota policy. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " +---- ++ +.Example output +[source,terminal] +---- +{"id": 1, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false} +---- + +. You can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquota[`PUT /api/v1/organization/{orgname}/quota/{quota_id}`] command to modify the existing quota limitation. Note that this requires the policy ID. For example: ++ +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- ++ +.Example output +[source,json] +---- +{"id": 1, "limit_bytes": 21474836480, "limit": "20.0 GiB", "default_config": false, "limits": [], "default_config_exists": false} +---- + +. An organization's quota can be deleted with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationquota[`DELETE /api/v1/organization/{orgname}/quota/{quota_id}`] command. For example: ++ +[source,terminal] ++ +---- +$ curl -X DELETE "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/reassigning-oauth-access-token.adoc b/modules/reassigning-oauth-access-token.adoc new file mode 100644 index 000000000..c00bf5a5b --- /dev/null +++ b/modules/reassigning-oauth-access-token.adoc @@ -0,0 +1,80 @@ +:_content-type: PROCEDURE +[id="reassigning-oauth-access-token"] += Reassigning an OAuth access token + +Organization administrators can assign OAuth API tokens to be created by other user's with specific permissions. This allows the audit logs to be reflected accurately when the token is used by a user that has no organization administrative permissions to create an OAuth API token. + +[NOTE] +==== +The following procedure only works on the current {productname} UI. It is not currently implemented in the {productname} v2 UI. +==== + +.Prerequisites + +* You are logged in as a user with organization administrative privileges, which allows you to assign an OAuth API token. ++ +[NOTE] +==== +OAuth API tokens are used for authentication and not authorization. For example, the user that you are assigning the OAuth token to must have the `Admin` team role to use administrative API endpoints. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#creating-an-image-repository-via-docker[Managing access to repositories]. +==== + +.Procedure + +. Optional. If not already, update your {productname} `config.yaml` file to include the `FEATURE_ASSIGN_OAUTH_TOKEN: true` field: ++ +[source,yaml] +---- +# ... +FEATURE_ASSIGN_OAUTH_TOKEN: true +# ... +---- + +. Optional. Restart your {productname} registry. + +. Log in to your {productname} registry as an organization administrator. + +. Click the name of the organization in which you created the OAuth token for. + +. In the navigation pane, click *Applications*. + +. Click the proper application name. + +. In the navigation pane, click *Generate Token*. + +. Click *Assign another user* and enter the name of the user that will take over the OAuth token. + +. Check the boxes for the desired permissions that you want the new user to have. For example, if you only want the new user to be able to create repositories, click *Create Repositories*. ++ +[IMPORTANT] +==== +Permission control is defined by the team role within an organization and must be configured regardless of the options selected here. For example, the user that you are assigning the OAuth token to must have the `Admin` team role to use administrative API endpoints. + +Solely checking the *Super User Access* box does not actually grant the user this permission. Superusers must be configured via the `config.yaml` file _and_ the box must be checked here. +==== + +. Click *Assign token*. A popup box appears that confirms authorization with the following message and shows you the approved permissions: ++ +[source,text] +---- +This will prompt user to generate a token with the following permissions: +repo:create +---- + +. Click *Assign token* in the popup box. You are redirected to a new page that displays the following message: ++ +[source,text] +---- +Token assigned successfully +---- + +.Verification + +. After reassigning an OAuth token, the assigned user must accept the token to receive the bearer token, which is required to use API endpoints. Request that the assigned user logs into the {productname} registry. + +. After they have logged in, they must click their username under *Users and Organizations*. + +. In the navigation pane, they must click *External Logins And Applications*. + +. Under *Authorized Applications*, they must confirm the application by clicking *Authorize Application*. They are directed to a new page where they must reconfirm by clicking *Authorize Application*. + +. They are redirected to a new page that reveals their bearer token. They must save this bearer token, as it cannot be viewed again. \ No newline at end of file diff --git a/modules/red-hat-quay-builders-ui.adoc b/modules/red-hat-quay-builders-ui.adoc new file mode 100644 index 000000000..d78e21f57 --- /dev/null +++ b/modules/red-hat-quay-builders-ui.adoc @@ -0,0 +1,74 @@ +:_content-type: PROCEDURE +[id="creating-a-build-trigger"] += Creating a build trigger + +The following procedure sets up a _custom Git trigger_. A custom Git trigger is a generic way for any Git server to act as a _build trigger_. It relies solely on SSH keys and webhook endpoints. Creating a custom Git trigger is similar to the creation of any other trigger, with the exception of the following: + +ifeval::["{context}" == "quay-io"] +* {quayio} cannot automatically detect the proper Robot Account to use with the trigger. This must be done manually during the creation process. +endif::[] + +ifeval::["{context}" == "use-quay"] +* {productname} cannot automatically detect the proper Robot Account to use with the trigger. This must be done manually during the creation process. +endif::[] + +These steps can be replicated to create a _build trigger_ using Github, Gitlab, or Bitbucket, however, you must configure the credentials for these services in your `config.yaml` file. + +[NOTE] +==== +* If you want to use Github to create a _build trigger_, you must configure Github to be used with {productname} by creating an OAuth application. For more information, see "Creating an OAuth application Github". +==== + +ifeval::["{context}" == "quay-builders-image-automation"] +.Prerequisites + +* For {productname-ocp} deployments, you have configured your {ocp} environment for either link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/builders_and_image_automation/build/tmp/en-US/html-single/index#bare-metal-builds[bare metal builds] or link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/builders_and_image_automation/build/tmp/en-US/html-single/index#red-hat-quay-builders-enhancement[virtual builds]. +endif::[] + +.Procedure + +. Log in to your {productname} registry. + +. In the navigation pane, click *Repositories*. + +. Click *Create Repository*. + +. Click the *Builds* tab. + +. On the *Builds* page, click *Create Build Trigger*. + +. Select the desired platform, for example, *Github*, *Bitbucket*, *Gitlab*, or use a custom Git repository. For this example, click *Custom Git Repository Push*. + +. Enter a custom Git repository name, for example, `\git@github.com:/.git`. Then, click *Next*. + +. When prompted, configure the tagging options by selecting one of, or both of, the following options: ++ +* *Tag manifest with the branch or tag name*. When selecting this option, the built manifest the name of the branch or tag for the git commit are tagged. ++ +* *Add `latest` tag if on default branch*. When selecting this option, the built manifest with latest if the build occurred on the default branch for the repository are tagged. ++ +Optionally, you can add a custom tagging template. There are multiple tag templates that you can enter here, including using short SHA IDs, timestamps, author names, committer, and branch names from the commit as tags. For more information, see "Tag naming for build triggers". ++ +After you have configured tagging, click *Next*. + +. When prompted, select the location of the Dockerfile to be built when the trigger is invoked. If the Dockerfile is located at the root of the git repository and named Dockerfile, enter */Dockerfile* as the Dockerfile path. Then, click *Next*. + +. When prompted, select the context for the Docker build. If the Dockerfile is located at the root of the Git repository, enter `/` as the build context directory. Then, click *Next*. + +. Optional. Choose an optional robot account. This allows you to pull a private base image during the build process. If you know that a private base image is not used, you can skip this step. + +. Click *Next*. Check for any verification warnings. If necessary, fix the issues before clicking *Finish*. + +. You are alerted that the trigger has been successfully activated. Note that using this trigger requires the following actions: +* You must give the following public key read access to the git repository. +* You must set your repository to `POST` to the following URL to trigger a build. ++ +Save the SSH Public Key, then click *Return to /*. You are redirected to the *Builds* page of your repository. + +. On the *Builds* page, you now have a _build trigger_. For example: ++ +image:build-trigger-example.png[Example Build trigger] ++ +After you have created a custom Git trigger, additional steps are required. Continue on to "Setting up a custom Git trigger". ++ +If you are setting up a _build trigger_ for Github, Gitlab, or Bitbucket, continue on to "Manually triggering a build". \ No newline at end of file diff --git a/modules/red-hat-quay-gcp-bucket-modify.adoc b/modules/red-hat-quay-gcp-bucket-modify.adoc new file mode 100644 index 000000000..1ea9f12ca --- /dev/null +++ b/modules/red-hat-quay-gcp-bucket-modify.adoc @@ -0,0 +1,87 @@ +:_content-type: PROCEDURE +[id="red-hat-quay-gcp-bucket-modify"] += Modifying your Google Cloud Platform object bucket + +[NOTE] +==== +Currently, modifying your Google Cloud Platform object bucket is not supported on IBM Power and IBM Z. +==== + +Use the following procedure to configure cross-origin resource sharing (CORS) for virtual builders. Without CORS configuration, uploading a build Dockerfile fails. + +.Procedure + +. Use the following reference to create a JSON file for your specific CORS needs. For example: ++ +[source,terminal] +---- +$ cat gcp_cors.json +---- ++ +.Example output ++ +[source,yaml] +---- +[ + { + "origin": ["*"], + "method": ["GET"], + "responseHeader": ["Authorization"], + "maxAgeSeconds": 3600 + }, + { + "origin": ["*"], + "method": ["PUT"], + "responseHeader": [ + "Content-Type", + "x-goog-acl", + "origin"], + "maxAgeSeconds": 3600 + } +] +---- + +. Enter the following command to update your GCP storage bucket: ++ +[source,terminal] +---- +$ gcloud storage buckets update gs:// --cors-file=./gcp_cors.json +---- ++ +.Example output ++ +[source,terminal] +---- +Updating + Completed 1 +---- + +. You can display the updated CORS configuration of your GCP bucket by running the following command: ++ +[source,terminal] +---- +$ gcloud storage buckets describe gs:// --format="default(cors)" +---- ++ +.Example output ++ +[source,yaml] +---- +cors: +- maxAgeSeconds: 3600 + method: + - GET + origin: + - '*' + responseHeader: + - Authorization +- maxAgeSeconds: 3600 + method: + - PUT + origin: + - '*' + responseHeader: + - Content-Type + - x-goog-acl + - origin +---- \ No newline at end of file diff --git a/modules/red-hat-quay-namespace-auto-pruning-overview.adoc b/modules/red-hat-quay-namespace-auto-pruning-overview.adoc new file mode 100644 index 000000000..5b255e504 --- /dev/null +++ b/modules/red-hat-quay-namespace-auto-pruning-overview.adoc @@ -0,0 +1,43 @@ +:_content-type: CONCEPT +[id="red-hat-quay-namespace-auto-pruning-overview"] += {productname} auto-pruning overview + +{productname} administrators can set up multiple auto-pruning policies on organizations and repositories; administrators can also set up auto-pruning policies at the registry level so that they apply to all organizations, including all newly created organizations. This feature allows for image tags to be automatically deleted within an organization or a repository based on specified criteria, which allows {productname} organization owners to stay below the storage quota by automatically pruning content. + +Currently, two policies have been added: + +* **Prune images by the number of tags**. For this policy, when the actual number of tags exceeds the desired number of tags, the oldest tags are deleted by their creation date until the desired number of tags is achieved. + +* **Prune image tags by creation date**. For this policy, any tags with a creation date older than the given time span, for example, 10 days, are deleted. + +After tags are automatically pruned, they go into the {productname} time machine, or the amount of time, after a tag is deleted, that the tag is accessible before being garbage collected. The expiration time of an image tag is dependent on your organization's settings. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#garbage-collection[{productname} garbage collection]. + +Users can configure multiple policies per namespace or repository; this can be done through the {productname} v2 UI. Policies can also be set by using the API endpoints through the command-line interface (CLI). + +[id="prerequisites-limitations-namespace-autopruning"] +== Prerequisites and limitations for auto-pruning and multiple policies + +The following prerequisites and limitations apply to the auto-pruning feature: + +* Auto-pruning is not available when using the {productname} legacy UI. You must use the v2 UI to create, view, or modify auto-pruning policies. + +* Auto-pruning is only supported in databases that support the `FOR UPDATE SKIP LOCKED` SQL command. + +* Auto-pruning is unavailable on mirrored repositories and read-only repositories. + +* If you are configuring multiple auto-prune policies, rules are processed without particular order, and individual result sets are processed immediately before moving on to the next rule. +** For example, if an image is already subject for garbage collection by one rule, it cannot be excluded from pruning by another rule. + +* If you have both an auto-pruning policy for an organization and a repository, the auto-pruning policies set at the organization level are executed first. + +[id="autopruning-regular-expressions"] +== Regular expressions with auto-pruning + +{productname} administrators can leverage _regular expressions_, or _regex_, to match a subset of tags for both organization- and repository-level auto-pruning policies. This provides more granular auto-pruning policies to target only certain image tags for removal. Consider the following when using _regular expressions_ with the auto-pruning feature: + +* _Regular expressions_ are optional. +* If a _regular expression_ is not provided, the auto-pruner defaults to pruning all image tags in the organization or the repository. These are user-supplied and must be protected against ReDOS attacks. +* Registry-wide policies do not currently support _regular expressions_. Only organization- and repository-level auto-pruning policies support _regular expressions_. +* _Regular expressions_ can be configured to prune images that either do, or _do not_, match the provided _regex_ pattern. + +Some of the following procedures provide example auto-pruning policies using _regular expressions_ that you can use as a reference when creating an auto-prune policy. diff --git a/modules/red-hat-quay-quota-management-configure-39.adoc b/modules/red-hat-quay-quota-management-configure-39.adoc new file mode 100644 index 000000000..0067aef52 --- /dev/null +++ b/modules/red-hat-quay-quota-management-configure-39.adoc @@ -0,0 +1,91 @@ +:_content-type: CONCEPT +[id="red-hat-quay-quota-management-39"] += Quota management for {productname} 3.9 + +If you are upgrading to {productname} 3.9, you must reconfigure the quota management feature. This is because with {productname} 3.9, calculation is done differently. As a result, totals prior to {productname} 3.9 are no longer valid. There are two methods for configuring quota management in {productname} 3.9, which are detailed in the following sections. + +[NOTE] +==== +* This is a one time calculation that must be done after you have upgraded to {productname} 3.9. +* Superuser privileges are required to create, update and delete quotas. While quotas can be set for users as well as organizations, you cannot reconfigure the _user_ quota using the {productname} UI and you must use the API instead. +==== + +[id="quota-management-configuring-38"] +== Option A: Configuring quota management for {productname} 3.9 by adjusting the QUOTA_TOTAL_DELAY feature flag + +Use the following procedure to recalculate {productname} 3.9 quota management by adjusting the `QUOTA_TOTAL_DELAY` feature flag. + +[NOTE] +==== +With this recalculation option, the totals appear as *0.00 KB* until the allotted time designated for `QUOTA_TOTAL_DELAY`. +==== + +.Prerequisites + +* You have upgraded to {productname} 3.9. +* You are logged into {productname} 3.9 as a superuser. + +.Procedure + +. Deploy {productname} 3.9 with the following `config.yaml` settings: ++ +[source,yaml] +---- +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_GARBAGE_COLLECTION: true +PERMANENTLY_DELETE_TAGS: true +QUOTA_TOTAL_DELAY_SECONDS: 1800 <1> +RESET_CHILD_MANIFEST_EXPIRATION: true +---- +<1> The `QUOTA_TOTAL_DELAY_SECONDS` flag defaults to `1800` seconds, or 30 minutes. This allows {productname} 3.9 to successfully deploy before the quota management feature begins calculating storage consumption for every blob that has been pushed. Setting this flag to a lower number might result in miscalculation; it *must* be set to a number that is greater than the time it takes your {productname} deployment to start. `1800` is the recommended setting, however larger deployments that take longer than 30 minutes to start might require a longer duration than `1800`. + +. Navigate to the {productname} UI and click the name of your Organization. + +. The *Total Quota Consumed* should read *0.00 KB*. Additionally, the *Backfill Queued* indicator should be present. + +. After the allotted time, for example, 30 minutes, refresh your {productname} deployment page and return to your Organization. Now, the *Total Quota Consumed* should be present. + +[id="quota-management-configuring-39"] +== Option B: Configuring quota management for {productname} 3.9 by setting QUOTA_TOTAL_DELAY_SECONDS to 0 + +Use the following procedure to recalculate {productname} 3.9 quota management by setting `QUOTA_TOTAL_DELAY_SECONDS` to `0`. + +[NOTE] +==== +Using this option prevents the possibility of miscalculations, however is more time intensive. Use the following procedure for when your {productname} deployment swaps the `FEATURE_QUOTA_MANAGEMENT` parameter from `false` to `true`. Most users will find xref: +==== + +.Prerequisites + +* You have upgraded to {productname} 3.9. +* You are logged into {productname} 3.9 as a superuser. + +.Procedure + +. Deploy {productname} 3.9 with the following `config.yaml` settings: ++ +[source,yaml] +---- +FEATURE_GARBAGE_COLLECTION: true +FEATURE_QUOTA_MANAGEMENT: true +QUOTA_BACKFILL: false +QUOTA_TOTAL_DELAY_SECONDS: 0 +PERMANENTLY_DELETE_TAGS: true +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +. Navigate to the {productname} UI and click the name of your Organization. + +. The *Total Quota Consumed* should read *0.00 KB*. + +. Redeploy {productname} and set the `QUOTA_BACKFILL` flag set to `true`. For example: ++ +[source,yaml] +---- +QUOTA_BACKFILL: true +---- ++ +[NOTE] +==== +If you choose to disable quota management after it has calculated totals, {productname} marks those totals as stale. If you re-enable the quota management feature again in the future, those namespaces and repositories are recalculated by the backfill worker. +==== \ No newline at end of file diff --git a/modules/red-hat-quay-s3-bucket-modify.adoc b/modules/red-hat-quay-s3-bucket-modify.adoc new file mode 100644 index 000000000..f36736a06 --- /dev/null +++ b/modules/red-hat-quay-s3-bucket-modify.adoc @@ -0,0 +1,51 @@ +:_content-type: PROCEDURE +[id="red-hat-quay-s3-bucket-modify"] += Modifying your AWS S3 storage bucket + +If you are using AWS S3 storage, you must change your storage bucket in the AWS console prior to starting a _build_. + +.Procedure + +. Log in to your AWS console at link:https://s3.console.aws.amazon.com[s3.console.aws.com]. + +. In the search bar, search for `S3` and then click *S3*. + +. Click the name of your bucket, for example, `myawsbucket`. + +. Click the *Permissions* tab. + +. Under *Cross-origin resource sharing (CORS)*, include the following parameters: ++ +[source,yaml] +---- + [ + { + "AllowedHeaders": [ + "Authorization" + ], + "AllowedMethods": [ + "GET" + ], + "AllowedOrigins": [ + "*" + ], + "ExposeHeaders": [], + "MaxAgeSeconds": 3000 + }, + { + "AllowedHeaders": [ + "Content-Type", + "x-amz-acl", + "origin" + ], + "AllowedMethods": [ + "PUT" + ], + "AllowedOrigins": [ + "*" + ], + "ExposeHeaders": [], + "MaxAgeSeconds": 3000 + } + ] +---- \ No newline at end of file diff --git a/modules/ref_quay-integration-config-fields.adoc b/modules/ref_quay-integration-config-fields.adoc index 08456df00..055ef44be 100644 --- a/modules/ref_quay-integration-config-fields.adoc +++ b/modules/ref_quay-integration-config-fields.adoc @@ -1,6 +1,5 @@ :_content-type: REFERENCE -[[quay-integration-config-fields]] - +[id="quay-integration-config-fields"] = QuayIntegration configuration fields The following configuration fields are available for the QuayIntegration custom resource: diff --git a/modules/regenerating-robot-account-token-api.adoc b/modules/regenerating-robot-account-token-api.adoc new file mode 100644 index 000000000..be1d0aac7 --- /dev/null +++ b/modules/regenerating-robot-account-token-api.adoc @@ -0,0 +1,44 @@ +:_content-type: CONCEPT +[id="regenerating-robot-account-api"] += Regenerating a robot account token by using the {productname} API + +Use the following procedure to regenerate a robot account token using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Enter the following command to regenerate a robot account token for an organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#regenerateorgrobottoken[`POST /api/v1/organization/{orgname}/robots/{robot_shortname}/regenerate`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + "/api/v1/organization//robots//regenerate" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test-org+test", "created": "Fri, 10 May 2024 17:46:02 -0000", "last_accessed": null, "description": "", "token": ""} +---- + +* Enter the following command to regenerate a robot account token for the current user with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#regenerateuserrobottoken[`POST /api/v1/user/robots/{robot_shortname}/regenerate`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots//regenerate" +---- ++ +Example output ++ +[source,terminal] +---- +{"name": "quayadmin+test", "created": "Fri, 10 May 2024 14:12:11 -0000", "last_accessed": null, "description": "", "token": ""} +---- \ No newline at end of file diff --git a/modules/regenerating-robot-account-token-ui.adoc b/modules/regenerating-robot-account-token-ui.adoc new file mode 100644 index 000000000..1cd2537de --- /dev/null +++ b/modules/regenerating-robot-account-token-ui.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="regenerating-robot-account-ui"] += Regenerating a robot account token by using the {productname} UI + +Use the following procedure to regenerate a robot account token by using the {productname} UI. + +.Prerequisites + +* You have logged into {productname}. + +.Procedure + +. Click the name of an Organization. + +. In the navigation pane, click *Robot accounts*. + +. Click the name of your robot account, for example, *testorg3+test*. + +. Click *Regenerate token* in the popup box. \ No newline at end of file diff --git a/modules/registry-wide-access-management.adoc b/modules/registry-wide-access-management.adoc new file mode 100644 index 000000000..3288fd90b --- /dev/null +++ b/modules/registry-wide-access-management.adoc @@ -0,0 +1,10 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="registry-wide-access-management"] += Registry-wide access management + +The following sections provide information about adjusting registry-wide permissions for both users and superusers. \ No newline at end of file diff --git a/modules/repo-creation-management.adoc b/modules/repo-creation-management.adoc new file mode 100644 index 000000000..7f4548747 --- /dev/null +++ b/modules/repo-creation-management.adoc @@ -0,0 +1,122 @@ +[id="repo-creation-api"] += Creating and configuring repositories by using the {productname} API + +Repositories can be created, retrieved, changed, and deleted by using the {productname} API. + +.Procedure + +. Enter the following command to create a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createrepo[`POST /api/v1/repository`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "repository": "", + "visibility": "", + "description": "." + }' \ + "https://quay-server.example.com/api/v1/repository" +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "quayadmin", "name": "", "kind": "image"} +---- + +. You can list a repositories with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepos[`GET /api/v1/repository`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository?public=true&starred=false&namespace=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"repositories": [{"namespace": "quayadmin", "name": "busybox", "description": null, "is_public": false, "kind": "image", "state": "MIRROR", "is_starred": false, "quota_report": {"quota_bytes": 2280675, "configured_quota": 2199023255552}}]} +---- + +. Visibility can be changed from public to private with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepovisibility[`POST /api/v1/repository/{repository}/changevisibility`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "visibility": "private" + }' \ + "https://quay-server.example.com/api/v1/repository///changevisibility" +---- +.Example output ++ +[source,terminal] +---- +{"success": true} +---- + +. You can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepo[`GET /api/v1/repository/{repository}`] command to return details about a repository: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +Example output ++ +[source,terminal] +---- +{"detail": "Not Found", "error_message": "Not Found", "error_type": "not_found", "title": "not_found", "type": "http://quay-server.example.com/api/v1/error/not_found", "status": 404} +---- + +. Repository descriptions can be updated with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updaterepo[`PUT /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "description": "This is an updated description for the repository." + }' \ + "https://quay-server.example.com/api/v1/repository//" +---- ++ +.Example output ++ +[source,terminal] +---- +{"success": true} +---- + +. Enter the following command to delete a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleterepository[`DELETE /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +This command does not return output in the CLI. + +//// +. The link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepostate[`PUT /api/v1/repository/{repository}/changestate`] API endpoint can be used to change the state of the repository: ++ +[source,terminal] +---- + +---- ++ +.Example output ++ +[source,terminal] +---- + +---- +//// \ No newline at end of file diff --git a/modules/repo-manage-api.adoc b/modules/repo-manage-api.adoc new file mode 100644 index 000000000..c85dabf42 --- /dev/null +++ b/modules/repo-manage-api.adoc @@ -0,0 +1,4 @@ +[id="repo-manage-api"] += Creating and configuring repositories by using the {productname} API + +Repositories can be created, retrieved, changed, and deleted by using the {productname} API. \ No newline at end of file diff --git a/modules/repo-mirroring-troubleshooting-issues.adoc b/modules/repo-mirroring-troubleshooting-issues.adoc new file mode 100644 index 000000000..a421b5b56 --- /dev/null +++ b/modules/repo-mirroring-troubleshooting-issues.adoc @@ -0,0 +1,112 @@ +:_content-type: PROCEDURE +[id="repo-mirroring-troubleshooting-issues"] += Troubleshooting repository mirroring + +Use the following sections to troubleshoot repository mirroring for {productname}. + +//// +[id="reviewing-logs-repo-mirroring"] +== Reviewing the logs of your mirrored {productname} instances + +Use the following procedure to review the logs of your mirrored instances. + +.Prerequisites + +* You have enabled debug mode in your {productname} `config.yaml` file. + +.Procedure + +* Retrieve the logs from all running mirror pods. + +.. If you are using the {productname} Operator, enter the following command: ++ +[source,terminal] +---- +$ oc logs mirror-pod +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman logs repomirror-container +---- + + +[id="checking-mirror-configuration"] +== Checking the mirror configuration + +Use the following procedure to review the mirror configuration settings in your {productname} instances. + +.Procedure + +* Review your `config.yaml` settings. + +.. If you are using the {productname} Operator, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it quay-pod -- cat /conf/stack/config.yaml +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it quay-container cat /conf/stack/config.yaml +---- +//// + +[id="verifying-authentication-permissions"] +== Verifying authentication and permissions + +Ensure that the authentication credentials used for mirroring have the necessary permissions and access rights on both the source and destination {productname} instances. + +On the {productname} UI, check the following settings: + +* The access control settings. Ensure that the user or service account performing the mirroring operation has the required privileges. +* The permissions of your robot account on the {productname} registry. + +//// +[id="manual-copy"] +== Checking slow disk issues + +Repository mirroring uses `skopeo copy` as a background process. Test the time it takes to copy an image by manually running `skopeo copy`. This can help isolate any issues related to specific images or repositories and narrow down the troubleshooting scope. Additionally, it can help identify any network issues or bottlenecks that might be impacting the mirroring performance or causing failures. Pay attention to network latency, packet loss, or any unusual network patterns. + +Use the following procedure to time `skopeo copy`. + +.Procedure + +* Enter the following command to measure the time it takes to perform `skopeo copy`: ++ +[source,terminal] +---- +$ time { skopeo copy docker://SOURCE_REGISTRY_IMAGE docker://DESTINATION_REGISTRY/REPOSITPRY/IMAGE:TAG } +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 4182b7568f06 skipped: already exists +Copying blob 4182b7568f06 skipped: already exists +Copying blob b7f76d1d9088 skipped: already exists +Copying blob ede3648667b7 skipped: already exists +Copying blob 021495d3c262 done +Copying blob 335fbccacdd3 done +Copying blob 4c70e3d931b6 done +Copying config d9f6ca2777 done +Writing manifest to image destination +Storing signatures + +real 6m19.291s +user 0m58.207s +sys 0m40.666s +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/articles/7018078[Troubleshooting Quay Repository Mirroring]. +//// \ No newline at end of file diff --git a/modules/repo-permission-api.adoc b/modules/repo-permission-api.adoc new file mode 100644 index 000000000..6c5e007e8 --- /dev/null +++ b/modules/repo-permission-api.adoc @@ -0,0 +1,6 @@ +[id="repo-permission-api"] += Managing repository permissions by using the {productname} API + +Repository permissions can be managed by using the {productname} API. For example, you can create, view, and delete user and team permissions. + +The following procedures show you how to manage repository permissions by using the {productname} API. \ No newline at end of file diff --git a/modules/repo-policy-api.adoc b/modules/repo-policy-api.adoc new file mode 100644 index 000000000..08c0cb5bd --- /dev/null +++ b/modules/repo-policy-api.adoc @@ -0,0 +1,122 @@ +[id="policy-api"] += Managing auto-prune policies by using the {productname} API + +Auto-prune policies can be created, retrieved, changed, and delete for organizations, repositories, and users by using the {productname} API. + +.Procedure + +. Enter the following command to create a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createrepo[`POST /api/v1/repository`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "repository": "", + "visibility": "", + "description": "." + }' \ + "https://quay-server.example.com/api/v1/repository" +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "quayadmin", "name": "", "kind": "image"} +---- + +. You can list a repositories with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepos[`GET /api/v1/repository`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository?public=true&starred=false&namespace=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"repositories": [{"namespace": "quayadmin", "name": "busybox", "description": null, "is_public": false, "kind": "image", "state": "MIRROR", "is_starred": false, "quota_report": {"quota_bytes": 2280675, "configured_quota": 2199023255552}}]} +---- + +. Visibility can be changed from public to private with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepovisibility[`POST /api/v1/repository/{repository}/changevisibility`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "visibility": "private" + }' \ + "https://quay-server.example.com/api/v1/repository///changevisibility" +---- +.Example output ++ +[source,terminal] +---- +{"success": true} +---- + +. You can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepo[`GET /api/v1/repository/{repository}`] command to return details about a repository: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +Example output ++ +[source,terminal] +---- +{"detail": "Not Found", "error_message": "Not Found", "error_type": "not_found", "title": "not_found", "type": "http://quay-server.example.com/api/v1/error/not_found", "status": 404} +---- + +. Repository descriptions can be updated with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updaterepo[`PUT /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "description": "This is an updated description for the repository." + }' \ + "https://quay-server.example.com/api/v1/repository//" +---- ++ +.Example output ++ +[source,terminal] +---- +{"success": true} +---- + +. Enter the following command to delete a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleterepository[`DELETE /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +This command does not return output in the CLI. + +//// +. The link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepostate[`PUT /api/v1/repository/{repository}/changestate`] API endpoint can be used to change the state of the repository: ++ +[source,terminal] +---- + +---- ++ +.Example output ++ +[source,terminal] +---- + +---- +//// \ No newline at end of file diff --git a/modules/repository-events.adoc b/modules/repository-events.adoc new file mode 100644 index 000000000..db539c1ab --- /dev/null +++ b/modules/repository-events.adoc @@ -0,0 +1,307 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT + +[id="repository-events"] += Repository events description + +The following sections detail repository events. + +[discrete] +[id="repository-push"] +== Repository Push + +A successful push of one or more images was made to the repository: + +---- +{ + "name": "repository", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "homepage": "https://quay.io/repository/dgangaia/repository", + "updated_tags": [ + "latest" + ] +} +---- + +[discrete] +[id="dockerfile-build-queued"] +== Dockerfile Build Queued + +The following example is a response from a Dockerfile Build that has been queued into the Build system. + +[NOTE] +==== +Responses can differ based on the use of optional attributes. +==== + +---- +{ + "build_id": "296ec063-5f86-4706-a469-f0a400bf9df2", + "trigger_kind": "github", //Optional + "name": "test", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional + "docker_tags": [ + "master", + "latest" + ], + "repo": "test", + "trigger_metadata": { + "default_branch": "master", + "commit": "b7f7d2b948aacbe844ee465122a85a9368b2b735", + "ref": "refs/heads/master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { //Optional + "url": "https://github.com/dgangaia/test/commit/b7f7d2b948aacbe844ee465122a85a9368b2b735", + "date": "2019-03-06T12:48:24+11:00", + "message": "adding 5", + "author": { //Optional + "username": "dgangaia", + "url": "https://github.com/dgangaia", //Optional + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional + }, + "committer": { + "username": "web-flow", + "url": "https://github.com/web-flow", + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" + } + } + }, + "is_manual": false, + "manual_user": null, + "homepage": "https://quay.io/repository/dgangaia/test/build/296ec063-5f86-4706-a469-f0a400bf9df2" +} +---- + +[discrete] +[id="dockerfile-build-started"] +== Dockerfile Build started + +The following example is a response from a Dockerfile Build that has been queued into the Build system. + +[NOTE] +==== +Responses can differ based on the use of optional attributes. +==== + +---- +{ + "build_id": "a8cc247a-a662-4fee-8dcb-7d7e822b71ba", + "trigger_kind": "github", //Optional + "name": "test", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional + "docker_tags": [ + "master", + "latest" + ], + "build_name": "50bc599", + "trigger_metadata": { //Optional + "commit": "50bc5996d4587fd4b2d8edc4af652d4cec293c42", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { //Optional + "url": "https://github.com/dgangaia/test/commit/50bc5996d4587fd4b2d8edc4af652d4cec293c42", + "date": "2019-03-06T14:10:14+11:00", + "message": "test build", + "committer": { //Optional + "username": "web-flow", + "url": "https://github.com/web-flow", //Optional + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional + }, + "author": { //Optional + "username": "dgangaia", + "url": "https://github.com/dgangaia", //Optional + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional + } + } + }, + "homepage": "https://quay.io/repository/dgangaia/test/build/a8cc247a-a662-4fee-8dcb-7d7e822b71ba" +} +---- + +[discrete] +[id="dockerfile-build-successfully-completed"] +== Dockerfile Build successfully completed + +The following example is a response from a Dockerfile Build that has been successfully completed by the Build system. + +[NOTE] +==== +This event occurs simultaneously with a _Repository Push_ event for the built image or images. +==== + +---- +{ + "build_id": "296ec063-5f86-4706-a469-f0a400bf9df2", + "trigger_kind": "github", //Optional + "name": "test", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional + "docker_tags": [ + "master", + "latest" + ], + "build_name": "b7f7d2b", + "image_id": "sha256:0339f178f26ae24930e9ad32751d6839015109eabdf1c25b3b0f2abf8934f6cb", + "trigger_metadata": { + "commit": "b7f7d2b948aacbe844ee465122a85a9368b2b735", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { //Optional + "url": "https://github.com/dgangaia/test/commit/b7f7d2b948aacbe844ee465122a85a9368b2b735", + "date": "2019-03-06T12:48:24+11:00", + "message": "adding 5", + "committer": { //Optional + "username": "web-flow", + "url": "https://github.com/web-flow", //Optional + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional + }, + "author": { //Optional + "username": "dgangaia", + "url": "https://github.com/dgangaia", //Optional + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional + } + } + }, + "homepage": "https://quay.io/repository/dgangaia/test/build/296ec063-5f86-4706-a469-f0a400bf9df2", + "manifest_digests": [ + "quay.io/dgangaia/test@sha256:2a7af5265344cc3704d5d47c4604b1efcbd227a7a6a6ff73d6e4e08a27fd7d99", + "quay.io/dgangaia/test@sha256:569e7db1a867069835e8e97d50c96eccafde65f08ea3e0d5debaf16e2545d9d1" + ] +} +---- + +[discrete] +[id="dockerfile-build-failed"] +== Dockerfile Build failed + +The following example is a response from a Dockerfile Build that has failed. + +---- +{ + "build_id": "5346a21d-3434-4764-85be-5be1296f293c", + "trigger_kind": "github", //Optional + "name": "test", + "repository": "dgangaia/test", + "docker_url": "quay.io/dgangaia/test", + "error_message": "Could not find or parse Dockerfile: unknown instruction: GIT", + "namespace": "dgangaia", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional + "docker_tags": [ + "master", + "latest" + ], + "build_name": "6ae9a86", + "trigger_metadata": { //Optional + "commit": "6ae9a86930fc73dd07b02e4c5bf63ee60be180ad", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { //Optional + "url": "https://github.com/dgangaia/test/commit/6ae9a86930fc73dd07b02e4c5bf63ee60be180ad", + "date": "2019-03-06T14:18:16+11:00", + "message": "failed build test", + "committer": { //Optional + "username": "web-flow", + "url": "https://github.com/web-flow", //Optional + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional + }, + "author": { //Optional + "username": "dgangaia", + "url": "https://github.com/dgangaia", //Optional + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional + } + } + }, + "homepage": "https://quay.io/repository/dgangaia/test/build/5346a21d-3434-4764-85be-5be1296f293c" +} +---- + +[discrete] +[id="dockerfile-build-cancelled"] +== Dockerfile Build cancelled + +The following example is a response from a Dockerfile Build that has been cancelled. + +---- +{ + "build_id": "cbd534c5-f1c0-4816-b4e3-55446b851e70", + "trigger_kind": "github", + "name": "test", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", + "docker_tags": [ + "master", + "latest" + ], + "build_name": "cbce83c", + "trigger_metadata": { + "commit": "cbce83c04bfb59734fc42a83aab738704ba7ec41", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { + "url": "https://github.com/dgangaia/test/commit/cbce83c04bfb59734fc42a83aab738704ba7ec41", + "date": "2019-03-06T14:27:53+11:00", + "message": "testing cancel build", + "committer": { + "username": "web-flow", + "url": "https://github.com/web-flow", + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" + }, + "author": { + "username": "dgangaia", + "url": "https://github.com/dgangaia", + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" + } + } + }, + "homepage": "https://quay.io/repository/dgangaia/test/build/cbd534c5-f1c0-4816-b4e3-55446b851e70" +} +---- + +ifeval::["{context}" == "use-quay"] + +[discrete] +[id="vulnerability-detected"] +== Vulnerability detected + +The following example is a response from a Dockerfile Build has detected a vulnerability in the repository. + +---- +{ + "repository": "dgangaia/repository", + "namespace": "dgangaia", + "name": "repository", + "docker_url": "quay.io/dgangaia/repository", + "homepage": "https://quay.io/repository/dgangaia/repository", + + "tags": ["latest", "othertag"], + + "vulnerability": { + "id": "CVE-1234-5678", + "description": "This is a bad vulnerability", + "link": "http://url/to/vuln/info", + "priority": "Critical", + "has_fix": true + } +} +---- +endif::[] diff --git a/modules/resetting-superuser-password-on-operator.adoc b/modules/resetting-superuser-password-on-operator.adoc new file mode 100644 index 000000000..bb07175fa --- /dev/null +++ b/modules/resetting-superuser-password-on-operator.adoc @@ -0,0 +1,82 @@ +:_content-type: CONCEPT +[id="resetting-superuser-password-on-operator"] += Resetting superuser passwords on the {productname} Operator + +.Prerequisites + +* You have created a {productname} superuser. +* You have installed Python 3.9. +* You have installed the `pip` package manager for Python. +* You have installed the `bcrypt` package for `pip`. + +.Procedure + +. Log in to your {productname} deployment. + +. On the {ocp} UI, navigate to *Workloads* -> *Secrets*. + +. Select the namespace for your {productname} deployment, for example, `Project quay`. + +. Locate and store the PostgreSQL database credentials. + +. Generate a secure, hashed password using the `bcrypt` package in Python 3.9 by entering the following command: ++ +[source,terminal] +---- +$ python3.9 -c 'import bcrypt; print(bcrypt.hashpw(b"newpass1234", bcrypt.gensalt(12)).decode("utf-8"))' +---- ++ +.Example output ++ +[source,terminal] +---- +$2b$12$zoilcTG6XQeAoVuDuIZH0..UpvQEZcKh3V6puksQJaUQupHgJ4.4y +---- + +. On the CLI, log in to the database, for example: ++ +[source,terminal] +---- +$ oc rsh quayuser-quay-quay-database-669c8998f-v9qsl +---- + +. Enter the following command to open a connection to the `quay` PostgreSQL database server, specifying the database, username, and host address: ++ +[source,terminal] +---- +sh-4.4$ psql -U quayuser-quay-quay-database -d quayuser-quay-quay-database -W +---- + +. Enter the following command to connect to the default database for the current user: ++ +[source,terminal] +---- +quay=> \c +---- + +. Update the `password_hash` of the superuser admin who lost their password: ++ +[source,terminal] +---- +quay=> UPDATE public.user SET password_hash = '$2b$12$zoilcTG6XQeAoVuDuIZH0..UpvQEZcKh3V6puksQJaUQupHgJ4.4y' where username = 'quayadmin'; +---- + +. Enter the following to command to ensure that the `password_hash` has been updated: ++ +[source,terminal] +---- +quay=> select * from public.user; +---- ++ +.Example output ++ +[source,terminal] +---- +id | uuid | username | password_hash | email | verified | stripe_id | organization | robot | invoice_email | invalid_login_attempts | last_invalid_login |removed_tag_expiration_s | enabled | invoice_email_address | company | family_name | given_name | location | maximum_queued_builds_count | creation_date | last_accessed +----+--------------------------------------+-----------+--------------------------------------------------------------+-----------------------+--- +-------+-----------+--------------+-------+---------------+------------------------+----------------------------+--------------------------+------ +---+-----------------------+---------+-------------+------------+----------+-----------------------------+----------------------------+----------- +1 | 73f04ef6-19ba-41d3-b14d-f2f1eed94a4a | quayadmin | $2b$12$zoilcTG6XQeAoVuDuIZH0..UpvQEZcKh3V6puksQJaUQupHgJ4.4y | quayadmin@example.com | t | | f | f | f | 0 | 2023-02-23 07:54:39.116485 | 1209600 | t | | | | | | | 2023-02-23 07:54:39.116492 +---- + +. Navigate to your {productname} UI on {ocp} and log in using the new credentials. diff --git a/modules/resource-demand-failed-operator.adoc b/modules/resource-demand-failed-operator.adoc new file mode 100644 index 000000000..56bf0fab4 --- /dev/null +++ b/modules/resource-demand-failed-operator.adoc @@ -0,0 +1,56 @@ +:_content-type: CONCEPT +[id="resource-demand-failed-operator"] += How can I handle failed {productname} Operator deployments caused by resource demand? + +The {productname} Operator deploys the following pods with default resource requests shown below. Default resource requests can be too large for smaller clusters, and might cause issues during rolling updates or even initial rollout. + +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +quay-operator.v3.6.2-d88c4f74b-7s8t7 1/1 Running 0 4m22s +subquay-clair-app-79f96d69dc-j7dzh 1/1 Running 0 2m35s +subquay-clair-app-79f96d69dc-n9svj 1/1 Running 0 2m3s +subquay-clair-postgres-cc4fdf4b7-hjv9m 1/1 Running 0 2m51s +subquay-quay-app-766f64b84d-grkqv 1/1 Running 0 2m35s +subquay-quay-app-766f64b84d-m4bps 1/1 Running 0 2m35s +subquay-quay-app-upgrade-wp9vd 0/1 Completed 0 2m44s +subquay-quay-config-editor-6c84649df8-v2zhz 1/1 Running 0 2m35s +subquay-quay-database-78bf9dd579-gjfvm 1/1 Running 0 2m33s +subquay-quay-mirror-b9c7657b6-7tptr 1/1 Running 0 2m11s +subquay-quay-mirror-b9c7657b6-phcfh 1/1 Running 0 2m11s +subquay-quay-postgres-init-lp8fv 0/1 Completed 0 2m36s +subquay-quay-redis-6c65bdc497-hsgfg 1/1 Running 0 3m31s + +1.clair-app Requests x 2 (instances): + cpu: 2 + memory: 2Gi +2. clair-postgres : +Requests: + cpu: 500m + memory: 2Gi +3. quay.app x 2 (instances) : +Requests: + cpu: 2 + memory: 8Gi +4. quay-database : +Requests: + cpu: 500m + memory: 2Gi +5. quay-mirror x 2 (instances): +Requests: + cpu: 500m + memory: 512Mi +6. redis +Requests: + cpu: 500m + memory: 1Gi +---- + +Resource limitation and requests cannot be lowered, however, you can disable the `horizontalpodautoscaling` components in the `QuayRegistry` custom resource definition (CRD) and use the `override` feature to set the replica count to `1`. This lowers the required resources. + +[NOTE] +==== +Using a single replica is prone to cause registry outages because the pod might get restarted during updates, {productname} configuration updates, node maintenance events, or unexpected node downtime. +==== + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#scale_down_your_red_hat_quay_deployment[Scaling down your {productname} deployment]. diff --git a/modules/restoring-red-hat-quay-standalone.adoc b/modules/restoring-red-hat-quay-standalone.adoc index 9d53acda0..b98d6ceb5 100644 --- a/modules/restoring-red-hat-quay-standalone.adoc +++ b/modules/restoring-red-hat-quay-standalone.adoc @@ -164,6 +164,7 @@ DISTRIBUTED_STORAGE_CONFIG: - s3_bucket: storage_path: /registry s3_access_key: + s3_region: s3_secret_key: host: ---- @@ -234,5 +235,6 @@ DISTRIBUTED_STORAGE_CONFIG: storage_path: /registry s3_access_key: s3_secret_key: + s3_region: host: ---- \ No newline at end of file diff --git a/modules/restoring-red-hat-quay.adoc b/modules/restoring-red-hat-quay.adoc index a78db2e3d..281ff05ca 100644 --- a/modules/restoring-red-hat-quay.adoc +++ b/modules/restoring-red-hat-quay.adoc @@ -1,38 +1,41 @@ -[[restoring-up-red-hat-quay]] +:_content-type: PROCEDURE +[id="restoring-up-red-hat-quay"] = Restoring {productname} -This procedure is used to restore {productname} when the {productname} Operator manages the database. It should be performed after a backup of your {productname} registry has been performed. See xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] for more information. - +Use the following procedures to restore {productname} when the {productname} Operator manages the database. It should be performed after a backup of your {productname} registry has been performed. See xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] for more information. .Prerequisites -* {productname} is deployed on OpenShift Container Platform using the {productname} Operator. +* {productname} is deployed on {ocp} using the {productname} Operator. * A backup of the {productname} configuration managed by the {productname} Operator has been created following the instructions in the xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] section * Your {productname} database has been backed up. * The object storage bucket used by {productname} has been backed up. * The components `quay`, `postgres` and `objectstorage` are set to `managed: true` -* If the component `clair` is set to `managed: true`, the component `clairpostgres` is also set to `managed: true` (starting with {productname} Operator v3.7 or later) -* There is no running {productname} deployment managed by the {productname} Operator in the target namespace on your OpenShift Container Platform cluster +* If the component `clair` is set to `managed: true`, the component `clairpostgres` is also set to `managed: true` (starting with {productname} v3.7 or later) +* There is no running {productname} deployment managed by the {productname} Operator in the target namespace on your {ocp} cluster [NOTE] ==== -If your deployment contains partially unmanaged database or storage components and you are using external services for Postgres or S3-compatible object storage to run your {productname} deployment, you must refer to the service provider or vendor documentation to restore their data from a backup prior to restore {productname} +If your deployment contains partially unmanaged database or storage components and you are using external services for PostgreSQL or S3-compatible object storage to run your {productname} deployment, you must refer to the service provider or vendor documentation to restore their data from a backup prior to restore {productname} ==== +[id="restoring-quay-and-configuration-from-backup"] == Restoring {productname} and its configuration from a backup +Use the following procedure to restore {productname} and its configuration files from a backup. + [NOTE] ==== These instructions assume you have followed the process in the xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] guide and create the backup files with the same names. ==== -. Restore the backed up {productname} configuration and the generated keys from the backup: +.Procedure + +. Restore the backed up {productname} configuration by entering the following command: + [source,terminal] ---- $ oc create -f ./config-bundle.yaml - -$ oc create -f ./managed-secret-keys.yaml ---- + [IMPORTANT] @@ -40,6 +43,13 @@ $ oc create -f ./managed-secret-keys.yaml If you receive the error `Error from server (AlreadyExists): error when creating "./config-bundle.yaml": secrets "config-bundle-secret" already exists`, you must delete your existing resource with `$ oc delete Secret config-bundle-secret -n ` and recreate it with `$ oc create -f ./config-bundle.yaml`. ==== +. Restore the generated keys from the backup by entering the following command: ++ +[source,terminal] +---- +$ oc create -f ./managed-secret-keys.yaml +---- + . Restore the `QuayRegistry` custom resource: + [source,terminal] @@ -54,9 +64,16 @@ $ oc create -f ./quay-registry.yaml $ oc wait quayregistry registry --for=condition=Available=true -n ---- -== Scale down your {productname} deployment +[id="scale-down-quay-deployment"] +== Scaling down your {productname} deployment -. *For Operator version 3.7 and newer:* Scale down the {productname} deployment by disabling auto scaling and overriding the replica count for Quay, mirror workers and Clair (if managed). Your `QuayRegistry` resource should look similar to the following: +Use the following procedure to scale down your {productname} deployment. + +.Procedure + +. Depending on the version of your {productname} deployment, scale down your deployment using one of the following options. + +.. *For Operator version 3.7 and newer:* Scale down the {productname} deployment by disabling auto scaling and overriding the replica count for Quay, mirror workers and Clair (if managed). Your `QuayRegistry` resource should look similar to the following: + [source,yaml] ---- @@ -87,18 +104,29 @@ spec: <1> Disable auto scaling of Quay, Clair and Mirroring workers <2> Set the replica count to 0 for components accessing the database and objectstorage -. *For Operator version 3.6 and earlier:* Scale down the {productname} deployment by scaling down the {productname} Operator first and then the managed {productname} resources: +.. *For Operator version 3.6 and earlier:* Scale down the {productname} deployment by scaling down the {productname} registry first and then the managed {productname} resources: + [source,terminal] ---- $ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/^quay-operator/ {print $1}') -n - +---- ++ +[source,terminal] +---- $ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-app/ {print $1}') -n +---- ++ +[source,terminal] +---- $ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-mirror/ {print $1}') -n +---- ++ +[source,terminal] +---- $ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/clair-app/ {print $1}') -n ---- -. Wait for the `registry-quay-app`, `registry-quay-mirror` and `registry-clair-app` pods (depending on which components you set to be managed by Operator) to disappear. You can check their status by running the following command: +. Wait for the `registry-quay-app`, `registry-quay-mirror` and `registry-clair-app` pods (depending on which components you set to be managed by {productname} Operator) to disappear. You can check their status by running the following command: + [source,terminal] ---- @@ -114,9 +142,14 @@ registry-quay-database-66969cd859-n2ssm 1/1 Running 0 registry-quay-redis-7cc5f6c977-956g8 1/1 Running 0 5d21h ---- -== Restore your {productname} database +[id="restoring-quay-database"] +== Restoring your {productname} database -. Identify your Quay database pod: +Use the following procedure to restore your {productname} database. + +.Procedure + +. Identify your `Quay` database pod by entering the following command: + [source,terminal] ---- @@ -135,14 +168,14 @@ quayregistry-quay-database-59f54bb7-58xs7 $ oc cp ./backup.sql -n registry-quay-database-66969cd859-n2ssm:/tmp/backup.sql ---- -. Open a remote terminal to the database: +. Open a remote terminal to the database by entering the following command: + [source,terminal] ---- $ oc rsh -n registry-quay-database-66969cd859-n2ssm ---- -. Enter psql: +. Enter psql by running the following command: + [source,terminal] ---- @@ -155,30 +188,34 @@ bash-4.4$ psql postgres=# \l ---- + -Example output: +.Example output + [source,terminal] +---- List of databases Name | Owner | Encoding | Collate | Ctype | Access privileges ----------------------------+----------------------------+----------+------------+------------+----------------------- postgres | postgres | UTF8 | en_US.utf8 | en_US.utf8 | quayregistry-quay-database | quayregistry-quay-database | UTF8 | en_US.utf8 | en_US.utf8 | +---- - -. Drop the database: +. Drop the database by entering the following command: + +[source,terminal] ---- postgres=# DROP DATABASE "quayregistry-quay-database"; ---- + -Example output: +.Example output + +[source,terminal] ---- DROP DATABASE ---- . Exit the postgres CLI to re-enter bash-4.4: + +[source,terminal] ---- \q ---- @@ -190,22 +227,28 @@ DROP DATABASE sh-4.4$ psql < /tmp/backup.sql ---- -. Exit bash: +. Exit bash by entering the following command: + +[source,terminal] ---- sh-4.4$ exit ---- +[id="restoring-quay-object-storage-data"] == Restore your {productname} object storage data -. Export the `AWS_ACCESS_KEY_ID`: +Use the following procedure to restore your {productname} object storage data. + +.Procedure + +. Export the `AWS_ACCESS_KEY_ID` by entering the following command: + [source,terminal] ---- $ export AWS_ACCESS_KEY_ID=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_ACCESS_KEY_ID}' |base64 -d) ---- -. Export the `AWS_SECRET_ACCESS_KEY`: +. Export the `AWS_SECRET_ACCESS_KEY` by entering the following command: + [source,terminal] ---- @@ -224,9 +267,12 @@ $ aws s3 sync --no-verify-ssl --endpoint https://$(oc get route s3 -n openshift- You can also use link:https://rclone.org/[rclone] or link:https://s3tools.org/s3cmd[sc3md] instead of the AWS command line utility. ==== -== Scale up your {productname} deployment +[id="scaling-up-quay"] +== Scaling up your {productname} deployment -. *For Operator version 3.7 and newer:* Scale up the {productname} deployment by re-enabling auto scaling, if desired, and removing the replica overrides for Quay, mirror workers and Clair as applicable. Your `QuayRegistry` resource should look similar to the following: +. Depending on the version of your {productname} deployment, scale up your deployment using one of the following options. + +.. *For Operator version 3.7 and newer:* Scale up the {productname} deployment by re-enabling auto scaling, if desired, and removing the replica overrides for Quay, mirror workers and Clair as applicable. Your `QuayRegistry` resource should look similar to the following: + [source,yaml] ---- @@ -251,7 +297,7 @@ spec: <1> Re-enables auto scaling of {productname}, Clair and mirroring workers again (if desired) <2> Replica overrides are removed again to scale the {productname} components back up -. *For Operator version 3.6 and earlier:* Scale up the {productname} deployment by scaling up the {productname} Operator again: +.. *For Operator version 3.6 and earlier:* Scale up the {productname} deployment by scaling up the {productname} registry again: + [source,terminal] ---- @@ -285,4 +331,4 @@ status: reason: HealthChecksPassing status: 'True' type: Available ----- +---- \ No newline at end of file diff --git a/modules/retrieving-build-info-superuser-api.adoc b/modules/retrieving-build-info-superuser-api.adoc new file mode 100644 index 000000000..09ac7aad7 --- /dev/null +++ b/modules/retrieving-build-info-superuser-api.adoc @@ -0,0 +1,31 @@ +[id="retrieving-build-info-superuser-api"] += Retrieving build information with the {productname} API + +As a superuser, you can retrieve information about builds with the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepobuildsuperuser[`GET /api/v1/superuser/{build_uuid}/build`] endpoint to return information about a build: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//build" \ + -H "Authorization: Bearer " +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepobuildstatussuperuser[`GET /api/v1/superuser/{build_uuid}/status`] API endpoint to return the status for the builds specified by the build uuids: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//status" \ + -H "Authorization: Bearer " +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepobuildlogssuperuser[`GET /api/v1/superuser/{build_uuid}/logs`] API endpoint to return the build logs for the build specified by the build uuid: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//logs" \ + -H "Authorization: Bearer " +---- + diff --git a/modules/reverting-tag-changes-api.adoc b/modules/reverting-tag-changes-api.adoc new file mode 100644 index 000000000..0fb862e7c --- /dev/null +++ b/modules/reverting-tag-changes-api.adoc @@ -0,0 +1,55 @@ +:_content-type: CONCEPT +[id="reverting-tag-changes-api"] += Reverting tag changes by using the API + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive _time machine_ feature that allows older images tags to remain in the repository for set periods of time so that they can revert changes made to tags. This feature allows users to revert tag changes, like tag deletions. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. You can restore a repository tag to its previous image by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#restoreta[`POST /api/v1/repository/{repository}/tag/{tag}/restore`] command. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": + }' \ + quay-server.example.com/api/v1/repository/quayadmin/busybox/tag/test/restore +---- ++ +.Example output ++ +[source,terminal] +---- +{} +---- + +. To see a list of tags after restoring an old tag you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] command. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test", "reversion": false, "start_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715697708, "end_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:41:48 -0000", "expiration": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715695488, "end_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:04:48 -0000", "expiration": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715631517, "end_ts": 1715695488, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Mon, 13 May 2024 20:18:37 -0000", "expiration": "Tue, 14 May 2024 14:04:48 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/reverting-tag-changes.adoc b/modules/reverting-tag-changes.adoc new file mode 100644 index 000000000..af9d53a84 --- /dev/null +++ b/modules/reverting-tag-changes.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="reverting-tag-changes"] += Reverting tag changes by using the UI + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive _time machine_ feature that allows older images tags to remain in the repository for set periods of time so that they can revert changes made to tags. This feature allows users to revert tag changes, like tag deletions. + +.Procedure + +. On the *Repositories* page of the v2 UI, click the name of the image you want to revert. + +. Click the *Tag History* tab. + +. Find the point in the timeline at which image tags were changed or removed. Next, click the option under *Revert* to restore a tag to its image. \ No newline at end of file diff --git a/modules/rn_3_10_0.adoc b/modules/rn_3_10_0.adoc new file mode 100644 index 000000000..6e11201b8 --- /dev/null +++ b/modules/rn_3_10_0.adoc @@ -0,0 +1,365 @@ +:_content-type: CONCEPT +[id="release-notes-310"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-10-3"] +== RHBA-2024:0382 - {productname} 3.10.3 release + +Issued 2024-01-31 + +{productname} release 3.10.3 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:0382[RHBA-2024:0382] advisory. + +[id="bug-fixes-310-3"] +=== {productname} 3.10.3 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-4849[PROJQUAY-4849]. Previously, the exporter failed to update the lifetime end of child manifests in the main manifest lists. Consequently, this led to exceptions when attemping to pull Docker images by tag after the tag was removed from the database due to garbage collection. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-6007[PROJQUAY-6007]. Previously, the Operator would attempt to create a temporary fake route to check if the cluster supported the _Route_ API. This check was unable to be conducted when the route and TLS component were marked as unamanged because these components are supposed to be managed manually by the user. This issue has been resolved. + +[id="rn-3-10-2"] +== RHBA-2024:0102 - {productname} 3.10.2 release + +Issued 2024-01-16 + +{productname} release 3.10.2 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:0102[RHBA-2024:0102] advisory. + +[id="new-features-310-2"] +=== {productname} 3.10.2 new features + +With this release, IBM Cloud object storage is now supported. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-ibmcloudstorage[IBM Cloud Object Storage]. + +[id="bug-fixes-310-2"] +=== {productname} 3.10.2 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-2679[PROJQUAY-2679] +* link:https://issues.redhat.com/browse/PROJQUAY-6549[PROJQUAY-6549] + +[id="known-issues-310-2"] +=== Known issues + +* A known issue was discovered when using naming conventions with the following words for repository names: ++ +`build` +`trigger` +`tag` ++ +When these words are used for repository names, users are unable access the repository, and are unable to permanently delete the repository. Attempting to delete these repositories returns the following error: `Failed to delete repository , HTTP404 - Not Found.` ++ +There is no workaround for this issue. Users should not use `build`, `trigger`, or `tag` in their repository names. + +[id="rn-3-10-1"] +== RHBA-2023:7819 - {productname} 3.10.1 release + +Issued 2023-12-14 + +{productname} release 3.10.1 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:7819[RHBA-2023:7819] advisory. + +[id="bug-fixes-310-1"] +=== {productname} 3.10.1 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-5452[PROJQUAY-5452] - Breadcrumbs incorrect when visiting a direct link +* link:https://issues.redhat.com/browse/PROJQUAY-6333[PROJQUAY-6333] - [New UI] The user in the team which has "member" or "creator" role can't see the "Teams and Membership" tab +* link:https://issues.redhat.com/browse/PROJQUAY-6336[PROJQUAY-6336] - Quay 3.10 new UI can't add normal user to quay new team during Create team wizard +* link:https://issues.redhat.com/browse/PROJQUAY-6369[PROJQUAY-6369] - The search input box doesn't work in permanently delete default permissions wizard of new UI + +[id="rn-3-10-0"] +== RHBA-2023:7341 - {productname} 3.10.0 release + +Issued 2023-11-28 + +{productname} release {producty} is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHSA-2023:7341[RHSA-2023:7341] and link:https://errata.devel.redhat.com/advisory/124676[RHSA-2023:7575] advisories. + +[id="release-cadence-310"] +== {productname} release cadence + +With the release of {productname} 3.10, the product has begun to align its release cadence and lifecycle with {ocp}. As a result, {productname} releases are now generally available (GA) within approximately four weeks of the most recent version of {ocp}. Customers can not expect the support lifecycle phases of {productname} to align with {ocp} releases. + +For more information, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="new-features-and-enhancements-310"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="ibm-power-z-linuxone-support"] +=== IBM Power, IBM Z, IBM® LinuxONE support + +With this release, IBM Power (ppc64le), IBM Z (s390x), and IBM® LinuxONE (s390x) architectures are supported. + +[id="namespace-auto-pruning-310-rn"] +=== Namespace auto-pruning + +With {productname} 3.10, {productname} administrators can set up auto-pruning policies on namespaces (both users and organization). This feature allows for image tags to be automatically deleted within a namespace based on specified criteria. For this release, two policies have been added: + +* Auto-pruning images based on the number of tags. +* Auto-pruning based on the age of a tag. + +The auto-pruning feature allows {productname} organization owners to stay below the storage quota by automatically pruning content based on one of the aforementioned policies. + +For more information about implementing this feature, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} namespace auto-pruning overview]. + +[id="v2-ui-enhancements-310"] +=== {productname} v2 UI enhancements + +In {productname} 3.8, a new UI was introduced as a technology preview feature. With {productname} 3.10, the following enhancements have been made to the v2 UI: + +* With this update, a *Settings* page has been added for {productname} organizations. {productname} administrators can edit their preferences, billing information, and set organization types from this page. + +* With this update, a *Settings* page has been added for {productname} repositories. This page must be enabled by setting `FEATURE_UI_V2_REPO_SETTINGS` to `true` in your `config.yaml` file. This page allows users to create and set robot permissions, create events and notifications, set repository visibility, and delete repositories. + +* With this update, bulk managing robot account repository access is available on the {productname} v2 UI. Users can now easily add a robot account to multiple repositories using the v2 UI. + +* With this update, the default user repository, or namespace, now includes a *Robot accounts* tab. This allows users to easily create their own robot accounts. + +* With this update, the following alert messages have been added to confirm either the creation, or failure, of robot accounts and permission updates: + +** *Successfully updated repository permission* +** *Successfully created robot account with robot name: + * ++ +Alternatively, you can receive the following error if you try to create a robot account with the same name as another: *Error creating robot account* +** *Successfully deleted robot account* + +* With this update, a *Teams and membership* page has been added to the v2 UI. {productname} administrators can perform the following actions from this page: + +** Create new teams +** Manage or create new team members +** Set repository permissions +** Search for specific teams +** View teams, members of a team, or collaborators of a team + +* With this update, a *Default permissions* page has be been added to the v2 UI. This page allows {productname} administrators to set repository permissions. + +* With this update, a *Tag History* page has been added to the v2 UI. Additionally, {productname} administrators can add and manage labels for repositories, and set expiration dates for specified tags in a repository. + +For more information about navigating the v2 UI and enabling, or using, these features, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#using-v2-ui[Using the {productname} v2 UI]. + +[id="clair-gc-manifests"] +=== Garbage collection of manifests for Clair + +Previously, Clair's indexer database was continually growing as it added storage when new manifests and layers were uploaded. This could cause the following issues for {productname} deployments: + +* Increased storage requirements +* Performance issues +* Increased storage management burden, requiring that administrators would monitor usage and develop a scaling strategy + +With this update, a new configuration field, `SECURITY_SCANNER_V4_MANIFEST_CLEANUP`, has been added. When this field is set to `true`, the {productname} garbage collector removes manifests that are not referenced by other tags or manifests. As a result, manifest reports are removed from Clair's database. + +[id="managing-robot-accounts-quay"] +=== Managing {productname} robot accounts + +Prior to {productname} {producty}, all users were able to create robot accounts with unrestricted access. With this release, {productname} administrators can manage robot accounts by disallowing users to create new robot accounts. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/use-quay-manage-repo#disabling-robot-account[Disabling robot accounts] + +[id="new-quay-config-fields-310"] +== New {productname} configuration fields + +The following configuration fields have been added to {productname} {producty}. + +[id="clair-gc-manifests-config-field"] +=== Clair garbage collection of manifests configuration field + +* **SECURITY_SCANNER_V4_MANIFEST_CLEANUP**. When set to `true` the {productname} garbage collector removes manifests that are not referenced by other tags or manifests. ++ +*Default*: `True` + +[id="disabling-robot-account-config-field"] +=== Disabling robot accounts configuration field + +* **ROBOTS_DISALLOW**: When set to `true`, robot accounts are prevented from all interactions, as well as from being created ++ +*Default*: `False` + +[id="namespace-auto-pruning-config-fields"] +=== Namespace auto-pruning configuration field + +The following configuration fields have been added for the auto-pruning feature: + +** **FEATURE_AUTO_PRUNE**: When set to `True`, enables functionality related to the auto-pruning of tags. ++ +*Default:* `False` + +[id="v2-ui-repo-settings-config-field"] +=== {productname} v2 UI repository settings configuration field + +* **FEATURE_UI_V2_REPO_SETTINGS**: When set to `True`, enables repository settings in the {productname} v2 UI. ++ +*Default:* `False` + +[id="quay-operator-updates-310"] +== {productname} Operator + +The following updates have been made to the {productname} Operator: + +* The config editor has been removed from the {productname} Operator on {ocp} deployments. As a result, the `quay-config-editor` pod no longer deploys, and users cannot check the status of the config editor route. Additionally, the Config Editor Endpoint no longer generates on the {productname} Operator *Details* page. ++ +Users with existing {productname} Operators who are upgrading from 3.7, 3.8, or 3.9 to {producty} must manually remove the {productname} config editor by removing the `deployment`, `route,` `service`, and `secret` objects. For information about this procedure, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/upgrade_red_hat_quay/operator-upgrade#config-editor-removal[Removing config editor objects on {productname} Operator]. ++ +By default, the config editor was deployed for every `QuayRegistry` instance, which made it difficult to establish an audit trail over the registry's configuration. Anyone with access to the namespace, config editor secret, and config editor route could use the editor to make changes to {productname}'s configuration, and their identity was no logged in the system. Removing the config editor forces all changes through the config bundle property of the `QuayRegistry` resource, which points to a secret, which is then subject to native Kubernetes auditing and logging. + +[id="known-issues-and-limitations-310"] +== {productname} 3.10 known issues and limitations + +The following sections note known issues and limitations for {productname} {producty}. + +[id="known-issues-310"] +=== {productname} 3.10 known issues + +* There is a known issue with the auto-pruning feature when pushing image tags with Cosign signatures. In some scenarios, for example, when each image tag uses a different Cosign key, the auto-pruner worker removes the image signature and only keeps the image tag. This occurs because {productname} considers image tags and the signature as two tags. The expected behavior of this feature is that the auto-pruner should consider the image tag and signature as one item, calculate only the image tag, and when the auto-pruner worker is configured in such a way that the tag is pruned, it also prunes the signature. This will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6380[*PROJQUAY-6380*]) + +* Currently, auditing for auto-pruning policy operations, including creating, updating, or deleting policies, is unavailable. This is a known issue and will be fixed in a future release of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6229[*PROJQUAY-6228*]) + +* Currently, the the auto-pruning worker prunes `ReadOnly` and mirror repositories, in addition to normal repositories. `ReadOnly` and mirror repositories should not be pruned automatically. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6235[*PROJQUAY-6235*]) + +* When upgrading the {productname} Operator from versions 3.7, 3.8, or 3.9 to {producty}, users must manually remove the {productname} config editor by removing the `deployment`, `route,` `service`, and `secret` objects. For information about this procedure, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#config-editor-removal[Removing config editor objects on {productname} Operator]. + +* When creating a new team using the {productname} v2 UI, users are unable to add normal users to the new team while. This only occurs while setting up the new team. As a workaround, you can add users after the team has been created. Robot accounts are unaffected by this issue. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6336[*PROJQUAY-6336*]) + +* Sometimes, when creating a new default permission setting, the *Create default permission* button is disabled. As a workaround, you can try adjusting the *Applied to* setting in the *Create default permission* wizard. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6341[*PROJQUAY-6341*]) + +[id="limitations-310"] +=== {productname} 3.10 limitations + +* In this release, the following features are not supported on IBM Power (ppc64le) and IBM Z (s390x): +** Geo-Replication +** IPv6 Single stack/ Dual Stack +** Mirror registry +** Quay config editor - Mirror, MAG, Kinesis, Keystone, GitHub Enterprise, OIDC +** RedHat Quay V2 User Interface +** Deploy Red Hat Quay - High Availability is supported but the following is not: +*** Backing up and restoring on a standalone deployment +*** Migrating a standalone to operator deployment + +* Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` breaks mirroring configurations. This will be fixed in a future version of {productname} + +//// + +Additionally, {productname} administrators can add robot accounts to allowlists when disallowing the creation of new robot accounts. This ensures operability of approved robot accounts. + +* Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` without allowlisting supplementary robot accounts will break mirroring configurations. This will be fixed in a future version of {productname} + +You must allowlist robot accounts with the `ROBOTS_WHITELIST` variable when managing robot accounts with the `ROBOTS_DISALLOW` field. Use the following reference when managing robot accounts: ++ +[source,yaml] +---- +ROBOTS_DISALLOW: true +ROBOTS_WHITELIST: + - quayadmin+robot1 + - quayadmin+robot2 + - quayadmin+robot3 +---- ++ +For more information, see. . . +//// + +[id="bug-fixes-310"] +== {productname} bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-6184[*PROJQUAY-6184*]. Add missing props for Create robot account modal +* link:https://issues.redhat.com/browse/PROJQUAY-6048[*PROJQUAY-6048*]. Poor UI performance with quotas enabled +* link:https://issues.redhat.com/browse/PROJQUAY-6010[*PROJQUAY-6010*]. Registry quota total worker fails to start due to import +* link:https://issues.redhat.com/browse/PROJQUAY-5212[*PROJQUAY-5212*]. Quay 3.8.1 can't mirror OCI images from Docker Hub +* link:https://issues.redhat.com/browse/PROJQUAY-2462[*PROJQUAY-2462*]. Consider changing the type of the removed_tag_expiration_s from integer to bigint +* link:https://issues.redhat.com/browse/PROJQUAY-2803[*PROJQUAY-2803*]. Quay should notify Clair when manifests are garbage collected +* link:https://issues.redhat.com/browse/PROJQUAY-5598[*PROJQUAY-5598*]. Log auditing tries to write to the database in read-only mode +* link:https://issues.redhat.com/browse/PROJQUAY-4126[*PROJQUAY-4126*]. Clair database growing +* link:https://issues.redhat.com/browse/PROJQUAY-5489[*PROJQUAY-5489*]. Pushing an artifact to Quay with oras binary results in a 502 +* link:https://issues.redhat.com/browse/PROJQUAY-3906[*PROJQUAY-3906*]. Quay can see the push image on Console after push image get error "Quota has been exceeded on namespace" + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.Technology Preview tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.10 | Quay 3.9 | Quay 3.8 + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/use-quay-manage-repo#disabling-robot-account[Disabling robot accounts] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} namespace auto-pruning overview] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#operator-georepl-site-removal[Single site geo-replication removal] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk log forwarding] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/configure_red_hat_quay/index#config-fields-nutanix[Nutanix Object Storage] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#proc_manage-ipv6-dual-stack[FEATURE_LISTEN_IP_VERSION] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-super-users-enabling[LDAP_SUPERUSER_FILTER] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-restricted-users-enabling[LDAP_RESTRICTED_USER_FILTER] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-superusers-full-access[FEATURE_SUPERUSERS_FULL_ACCESS] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-global-readonly-super-users[GLOBAL_READONLY_SUPER_USERS] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-feature-restricted-users[FEATURE_RESTRICTED_USERS] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-restricted-users-whitelist[RESTRICTED_USERS_WHITELIST] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#quay-as-cache-proxy[{productname} as proxy cache for upstream registries] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#clair-crda-configuration[Java scanning with Clair] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +//// +[id="deprecated-features"] +=== Deprecated features +//// diff --git a/modules/rn_3_11_0.adoc b/modules/rn_3_11_0.adoc new file mode 100644 index 000000000..614b75ba9 --- /dev/null +++ b/modules/rn_3_11_0.adoc @@ -0,0 +1,448 @@ +:_content-type: CONCEPT +[id="release-notes-311"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-11-0"] +== RHBA-2024:1475 - {productname} 3.11.0 release + +Issued 2024-04-02 + +{productname} release {producty} is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:1475[RHBA-2024:1475] advisory. For the most recent compatibility matrix, see link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Tested Integrations]. + +[id="release-cadence-311"] +== {productname} release cadence + +With the release of {productname} 3.10, the product has begun to align its release cadence and lifecycle with {ocp}. As a result, {productname} releases are now generally available (GA) within approximately four weeks of the most recent version of {ocp}. Customers can not expect the support lifecycle phases of {productname} to align with {ocp} releases. + +For more information, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="documentation-changes-311"] +== {productname} documentation changes + +The {productname} configuration tool has been deprecated since version 3.10. With this release, references and procedures that use the configuration tool have been, or will be, removed. These procedures will remain in older versions of {productname}. + +[id="new-features-and-enhancements-311"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="aws-sts-support-quay-311"] +=== Support for AWS STS on {productname} + +Support for Amazon Web Services (AWS) Security Token Service (STS) is now offered for {productname}. AWS STS is a web service for requesting temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users and for users that you authenticate, or _federated users_. This feature is useful for clusters using Amazon S3 as an object storage, allowing {productname} to use STS protocols to authenticate with Amazon S3, which can enhance the overall security of the cluster and help to ensure that access to sensitive data is properly authenticated and authorized. This feature is also available for {ocp} deployments. + +For more information about configuring AWS STS for standalone {productname} deployments, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#configuring-aws-sts-quay[Configuring AWS STS for {productname}] + +[id="auto-pruning-enhancements"] +=== {productname} auto-pruning enhancements + +With the release of {productname} 3.10, a new auto-pruning feature was released. With that feature, {productname} administrators could set up auto-pruning policies on namespaces for both users and organizations. + +With this release, auto-pruning policies can now be set up on specified repositories. This feature allows for image tags to be automatically deleted within a repository based on specified criteria. Additionally, {productname} administrators can set auto-pruning policies on repositories that they have `admin` privileges for. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="v2-ui-enhancements-311"] +=== {productname} v2 UI enhancements + +In {productname} 3.8, a new UI was introduced as a technology preview feature. With {productname} 3.11, the following enhancements have been made to the v2 UI. + +[id="usage-logs-ui-v2"] +==== {productname} v2 UI usage logs + +{productname} 3.11 adds functionality for usage logs when using the v2 UI. Usage logs provide the following information about your {productname} deployment: + +* *Monitoring of team activities*. Allows administrators to view team activities, such as team creation, membership changes, and role assignments. +* *Auditing of tag history actions*. Allows security auditors to audit tag history actions, including tag creations, updates, and deletions. +* *Tracking of repository label changes*. Allows repository owners to track changes to labels, including additions, modifications, and removals. +* *Monitoring of expiration settings*. Allows engineers to monitor actions related to tag expiration settings, such as setting expiration dates or disabling expiration for specific tags. + +Logs can be exported to an email address or to a callback URL, and are available at the Organization, repository, and namespace levels. + +For more information, see https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-usage-logs-v2-ui[Viewing usage logs on the {productname} v2 UI]. + +[id="dark-mode-ui-v2"] +==== {productname} v2 UI dark mode + +{productname} 3.11 offers users the ability to switch between light and dark modes when using the v2 UI. This feature also includes an automatic mode selection, which chooses between light or dark modes depending on the user's browser preference. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#selecting-dark-mode-ui[Selecting color theme preference on the {productname} v2 UI]. + +[id="builds-support-v2-ui"] +==== Builds support on {productname} v2 UI + +{productname} Builds are now supported when using the v2 UI. This feature must be enabled prior to building container images by setting `FEATURE_BUILD_SUPPORT: true` in your `config.yaml` file. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#starting-a-build[Creating a new build]. + +[id="auto-pruning-repositories-ui"] +==== Auto-pruning repositories v2 UI + +{productname} 3.11 offers users the ability to create auto-pruning policies using the v2 UI. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="team-synchronization-oidc"] +=== Team synchronization support via {productname} OIDC + +This release allows administrators to leverage an OpenID Connect (OIDC) identity provider to synchronization team, or group, settings, so long as their OIDC provider supports the retrieval of group information from ID token or the `/userinfo` endpoint. Administrators can easily apply repository permissions to sets of users without having to manually create and sync group definitions between {productname} and the OIDC group, which is not scalable. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#oidc-team-sync[Team synchronization for {productname} OIDC deployments] + +[id="quay-operator-updates-311"] +== {productname} Operator updates + +The following updates have been made to the {productname} Operator: + +[id="configurable-resources-managed-components"] +=== Configurable resource requests for {productname-ocp} managed components + +With this release, users can manually adjust the resource requests on {productname-ocp} for the following components that have pods running: + +* `quay` +* `clair` +* `mirroring` +* `clairpostgres` +* `postgres` + +This feature allows users to run smaller test clusters, or to request more resources upfront in order to avoid partially degraded `Quay` pods. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] + +[id="aws-sts-support-quay-operator-311"] +=== Support for AWS STS on {productname-ocp} + +Support for Amazon Web Services (AWS) Security Token Service (STS) is now offered for {productname} deployments on {ocp}. AWS STS is a web service for requesting temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users and for users that you authenticate, or _federated users_. This feature is useful for clusters using Amazon S3 as an object storage, allowing {productname} to use STS protocols to authenticate with Amazon S3, which can enhance the overall security of the cluster and help to ensure that access to sensitive data is properly authenticated and authorized. + +For more information about AWS STS for {productname-ocp}, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#configuring-aws-sts-quay[Configuring AWS STS for {productname-ocp}] + +[id="new-quay-config-fields-311"] +== New {productname} configuration fields + +The following configuration fields have been added to {productname} {producty}. + +[id="aws-s3-sts-configuration-fields"] +=== Configuration fields for AWS S3 STS deployments + +The following configuration fields have been added when configuring AWS STS for {productname}. These fields are used when configuring AWS S3 storage for your deployment. + +* *.sts_role_arn*. The unique Amazon Resource Name (ARN) required when configuring AWS STS for {productname}. +* *.sts_user_access_key*. The generated AWS S3 user access key required when configuring AWS STS for {productname}. +* *.sts_user_secret_key*. The generated AWS S3 user secret key required when configuring AWS STS for {productname}. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-storage-aws-sts[AWS STS S3 storage]. + +[id="team-sync-configuration-field"] +=== Team synchronization configuration field + +The following configuration field has been added for the team synchronization via OIDC feature: + +* *PREFERRED_GROUP_CLAIM_NAME*: The key name within the OIDC token payload that holds information about the user's group memberships. + +[id="new-api-endpoints-311"] +== New API endpoints + +The following API endpoints have been added to {productname} {producty}: + +[id="repository-auto-pruning-policy-endpoint"] +=== Repository auto-pruning policy endpoints: + +The repository auto-pruning policy feature introduces the following API endpoint: + +* `*/api/v1/repository///autoprunepolicy/` ++ +This API endpoint can be used with `POST`, `GET`, and `DELETE` calls to create, see, and delete auto-pruning policies on a repository for specific users in your organization. Note that you must have `admin` privileges on the repository that you are creating the policy for when using these commands. + +[id="known-issues-and-limitations-310"] +== {productname} 3.11 known issues and limitations + +The following sections note known issues and limitations for {productname} {producty}. + +[id="oidc-team-sync-known-issues"] +=== {productname} OIDC team synchronization known issues + +[id="unable-set-password-user-settings-page"] +==== Unable to set user passwords via the *User Settings* page + +There is a known issue when {productname} uses OIDC as the authentication type with Microsoft Entra ID (previously Azure Active Directory). + +After logging in to {productname}, users are unable to set a password via the *User Settings* page. This is necessary for authentication when using Docker/Podman CLI to perform image push or pull operations to the registry. + +As a workaround, you can use Docker CLI and App Token as credentials when authenticating via OIDC. These tokens, alongside robot tokens, serve as an alternative to passwords and are considered the prescribed method for providing access to {productname} when authenticating via OIDC. + +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-6754[*PROJQUAY-6754*]. + +[id="team-sync-removal-known-issue"] +==== Unable to sync change when OIDC user is removed from OIDC + +Currently, when an OIDC user is removed from their OIDC provider, the user is not removed from the team on {productname}. They are still able to use the robot account token and app token to push and pull images from the registry. This is the expected behavior, however this behavior will be changed in a future version of {productname}. +(link:https://issues.redhat.com/browse/PROJQUAY-6842[*PROJQUAY-6842*]) + +[id="entra-id-team-sync-known-issue"] +==== Object ID must be used when OIDC provider is Microsoft Entra ID + +When using Microsoft Entra ID as your OIDC provider, {productname} administrators must input the *Object ID* of the OIDC group instead of the group name. The v2 UI does not currently alert users that Microsoft Entra ID users must input the Object ID of the OIDC group. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6917[*PROJQUAY-6917*]) + +[id="sts-s3-storage-known-issue"] +=== STS S3 storage known issue + +When using Amazon Web Services (AWS) Security Token Service (STS) with proxy storage enabled, users are unable to pull images and the following error is returned: `Error: copying system image from manifest list: parsing image configuration: fetching blob: received unexpected HTTP status: 502 Bad Gateway`. This is a known issue and will be fixed in a future version of {productname}. + +[id="upgrading-38-311-limitation"] +=== Upgrading {productname-ocp} 3.8 directly to 3.11 limitation + +Upgrading {productname-ocp} from 3.8 to 3.11 does not work. Users must upgrade from {productname-ocp} from 3.8 to 3.9 or 3.10, and then proceed with the upgrade to 3.11. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_overview[Upgrade {productname}]. + +[id="configurable-resource-limitation"] +=== Configurable resource request limitation + +Attempting to set resource limitations for the `Quay` pod too low results in the pod being unable to boot up with the following statuses returned: `OOMKILLED` and `CrashLoopBackOff`. Resource limitations can not be set lower than the minimum requirement, which can be found on the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] page. + +[id="v2-ui-known-issues-311"] +=== {productname} v2 UI known issues + +The {productname} team is aware of the following known issues on the v2 UI: + +* link:https://issues.redhat.com/browse/PROJQUAY-6910[*PROJQUAY-6910*]. The new UI can't group and stack the chart on usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6909[*PROJQUAY-6909*]. The new UI can't toggle the visibility of the chart on usage log +* link:https://issues.redhat.com/browse/PROJQUAY-6904[*PROJQUAY-6904*]. "Permanently delete" tag should not be restored on new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6899[*PROJQUAY-6899*]. The normal user can not delete organization in new UI when enable FEATURE_SUPERUSERS_FULL_ACCESS +* link:https://issues.redhat.com/browse/PROJQUAY-6892[*PROJQUAY-6892*]. The new UI should not invoke not required stripe and status page +* link:https://issues.redhat.com/browse/PROJQUAY-6884[*PROJQUAY-6884*]. The new UI should show the tip of slack Webhook URL when creating slack notification +* link:https://issues.redhat.com/browse/PROJQUAY-6882[*PROJQUAY-6882*]. The new UI global readonly super user can't see all organizations and image repos +* link:https://issues.redhat.com/browse/PROJQUAY-6881[*PROJQUAY-6881*]. The new UI can't show all operation types in the logs chart +* link:https://issues.redhat.com/browse/PROJQUAY-6861[*PROJQUAY-6861*]. The new UI "Last Modified" of organization always show N/A after target organization's setting is updated +* link:https://issues.redhat.com/browse/PROJQUAY-6860[*PROJQUAY-6860*]. The new UI update the time machine configuration of organization show NULL in usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6859[*PROJQUAY-6859*]. Thenew UI remove image repo permission show "undefined" for organization name in audit logs +* link:https://issues.redhat.com/browse/PROJQUAY-6854[*PROJQUAY-6854*]. "Device-based theme" doesn't work as design in Firefox +* link:https://issues.redhat.com/browse/PROJQUAY-6852[*PROJQUAY-6852*]. "Tag manifest with the branch or tag name" option in build trigger setup wizard should be checked by default. +* link:https://issues.redhat.com/browse/PROJQUAY-6832[*PROJQUAY-6832*]. The new UI should validate the OIDC group name when enable OIDC Directory Sync +* link:https://issues.redhat.com/browse/PROJQUAY-6831[*PROJQUAY-6831*]. The new UI should not show invited tab when the team is configured sync from OIDC group +* link:https://issues.redhat.com/browse/PROJQUAY-6830[*PROJQUAY-6830*]. The new UI should show the sync icon when the team is configured sync team members from OIDC Group +* link:https://issues.redhat.com/browse/PROJQUAY-6829[*PROJQUAY-6829*]. The new UI team member added to team sync from OIDC group should be audited in Organization logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6825[*PROJQUAY-6825*]. Build cancel operation log can not be displayed correctly in new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6812[*PROJQUAY-6812*]. The new UI the "performer by" is NULL of build image in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6810[*PROJQUAY-6810*]. The new UI should highlight the tag name with tag icon in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6808[*PROJQUAY-6808*]. The new UI can't click the robot account to show credentials in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6807[*PROJQUAY-6807*]. The new UI can't see the operations types in log page when quay is in dark mode +* link:https://issues.redhat.com/browse/PROJQUAY-6770[*PROJQUAY-6770*]. The new UI build image by uploading Docker file should support .tar.gz or .zip +* link:https://issues.redhat.com/browse/PROJQUAY-6769[*PROJQUAY-6769*]. The new UI should not display message "Trigger setup has already been completed" after build trigger setup completed +* link:https://issues.redhat.com/browse/PROJQUAY-6768[*PROJQUAY-6768*]. The new UI can't navigate back to current image repo from image build +* link:https://issues.redhat.com/browse/PROJQUAY-6767[*PROJQUAY-6767*]. The new UI can't download build logs +* link:https://issues.redhat.com/browse/PROJQUAY-6758[*PROJQUAY-6758*]. The new UI should display correct operation number when hover over different operation type +* link:https://issues.redhat.com/browse/PROJQUAY-6757[*PROJQUAY-6757*]. The new UI usage log should display the tag expiration time as date format + +[id="dark-mode-ui-v2-known-issues"] +==== {productname} v2 UI dark mode known issue + +If you are using the the automatic mode selection, which chooses between light or dark modes depending on the user's browser preference, your operating system appearance is overridden by the browser website appearance setting. If you find that the device-based theme is not working as expect, check your browser appearance setting. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6903[*PROJQUAY-6903*]) + +//// + +[id="tag-expiration-known-issue"] +=== Tag expiration behavior + +The following behavior was observed when setting expiration time on a tag that is *older* than two years. This is not a known issue, but is instead the default behavior. This will be changed in a future version of {productname} due to the potential issues that could arise for some users. + +When setting expiration time on a tag that is *older* than two years, the tag is expired immediately and subsequently garbage collected. It does not end up in the time machine, and is deleted from the registry. This issue occurs because, by default, the `LABELED_EXPIRATION_MAXIMUM` parameter is set to `104w`, or two years. + +As a temporary workaround, you can increase the default value of the `LABELED_EXPIRATION_MAXIMUM` parameter in your `config.yaml` file. For example: + +[source,yaml] +---- +# ... +LABELED_EXPIRATION_MAXIMUM: 156w +# ... +---- + +By increasing the value of this field to, for example, `156w` (three years), it is possible to set the expiration time for a tag that is up to 3 years old. For example, if a tag is created on March 25, 2021, the expiration date of the tag can be set up to March 24, 2024. The expiration date of the tag could not be set to later than this date, for example, July 2024, because it is over three years from when the tag was first created. + + +Additionally, {productname} administrators can add robot accounts to allowlists when disallowing the creation of new robot accounts. This ensures operability of approved robot accounts. + +* Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` without allowlisting supplementary robot accounts will break mirroring configurations. This will be fixed in a future version of {productname} + +You must allowlist robot accounts with the `ROBOTS_WHITELIST` variable when managing robot accounts with the `ROBOTS_DISALLOW` field. Use the following reference when managing robot accounts: ++ +[source,yaml] +---- +ROBOTS_DISALLOW: true +ROBOTS_WHITELIST: + - quayadmin+robot1 + - quayadmin+robot2 + - quayadmin+robot3 +---- ++ +For more information, see. . . +//// + +[id="notable-technical-changes"] +== Notable technical changes + +The following technical changes have been made to {productname} in 3.11. + +[id="removal-support-pgbouncer"] +=== Removal of support for PgBouncer + +{productname} 3.11 does not support PgBouncer. + +[id="power-z-linuxone-support-matrix-changes"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix changes + +Support has changed for some IBM Power, IBM Z, and IBM® LinuxONE features. For more information, see the "IBM Power, IBM Z, and IBM® LinuxONE support matrix" table. + +[id="bug-fixes-311"] +== {productname} bug fixes + +The following issues were fixed with {productname} 3.11: + +* link:https://issues.redhat.com/browse/PROJQUAY-6586[*PROJQUAY-6586*]. Big layer upload fails on Ceph/RADOS driver. +* link:https://issues.redhat.com/browse/PROJQUAY-6648[*PROJQUAY-6648*]. Application token Docker/Podman login command fails on windows. +* link:https://issues.redhat.com/browse/PROJQUAY-6673[*PROJQUAY-6673*]. Apply IGNORE_UNKNOWN_MEDIATYPE to child manifests in manifest lists. +* link:https://issues.redhat.com/browse/PROJQUAY-6619[*PROJQUAY-6619*]. Duplicate scrollbars in various UI screens. +* link:https://issues.redhat.com/browse/PROJQUAY-6235[*PROJQUAY-6235*]. mirror and readonly repositories should not be pruned. +* link:https://issues.redhat.com/browse/PROJQUAY-6243[*PROJQUAY-6243*]. Unable to edit repository description on Quay.io. +* link:https://issues.redhat.com/browse/PROJQUAY-5793[*PROJQUAY-5793*]. Next page button in tags view does not work correctly when the repo contains manifests and manifests lists. +* link:https://issues.redhat.com/browse/PROJQUAY-6442[*PROJQUAY-6442*]. new ui: Breadcrumb for teams page. +* link:https://issues.redhat.com/browse/PROJQUAY-6247[*PROJQUAY-6247*]. [New UI] Menu item naming convention doesn't follow "First Letter Capital" style. +* link:https://issues.redhat.com/browse/PROJQUAY-6261[*PROJQUAY-6261*]. Throw Robot Account exist error when entering existing robot account. +* link:https://issues.redhat.com/browse/PROJQUAY-6577[*PROJQUAY-6577*]. Quay operator does not render proper Clair config.yaml if customization is applied. +* link:https://issues.redhat.com/browse/PROJQUAY-6699[*PROJQUAY-6699*]. Broken links in Red hat Quay operator description. +* link:https://issues.redhat.com/browse/PROJQUAY-6841[*PROJQUAY-6841*]. Unable to upload dockerfile for build with 405. + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.New features tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.11 | Quay 3.10 | Quay 3.9 + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#oidc-team-sync[Team synchronization for {productname} OIDC deployments] +|General Availability +|- +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#configuring-aws-sts-quay[Configuring AWS STS for {productname}], link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#configuring-aws-sts-quay[Configuring AWS STS for {productname-ocp}] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} repository auto-pruning] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#configuring-dark-mode-ui[Configuring dark mode on the {productname} v2 UI] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/use-quay-manage-repo#disabling-robot-account[Disabling robot accounts] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} namespace auto-pruning] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#operator-georepl-site-removal[Single site geo-replication removal] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk log forwarding] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/configure_red_hat_quay/index#config-fields-nutanix[Nutanix Object Storage] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#clair-crda-configuration[Java scanning with Clair] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +[id="ibm-power-z-linuxone-support-matrix"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix + +.list of supported and unsupported features +[cols="3,1,1",options="header"] +|=== +|Feature |IBM Power |IBM Z and IBM(R) LinuxONE + +|Allow team synchronization via OIDC on Azure +|Not Supported +|Not Supported + +|Backing up and restoring on a standalone deployment +|Supported +|Supported + +|Geo-Replication (Standalone) +|Not Supported +|Supported + +|Geo-Replication (Operator) +|Not Supported +|Not Supported + +|IPv6 +|Not Supported +|Not Supported + +|Migrating a standalone to operator deployment +|Supported +|Supported + +|Mirror registry +|Not Supported +|Not Supported + +|PostgreSQL connection pooling via pgBouncer +|Supported +|Supported + +|Quay config editor - mirror, OIDC +|Supported +|Supported + +|Quay config editor - MAG, Kinesis, Keystone, GitHub Enterprise +|Not Supported +|Not Supported + +|Quay config editor - Red Hat Quay V2 User Interface +|Supported +|Supported + +|Repo Mirroring +|Supported +|Supported +|=== \ No newline at end of file diff --git a/modules/rn_3_12_0.adoc b/modules/rn_3_12_0.adoc new file mode 100644 index 000000000..3e2d47a31 --- /dev/null +++ b/modules/rn_3_12_0.adoc @@ -0,0 +1,535 @@ +:_content-type: CONCEPT +[id="release-notes-312"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-12-1"] +== RHBA-2024:5039 - {productname} 3.12.1 release + +Issued 2024-08-14 + +{productname} release 3.12.1 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:5039[RHBA-2024:5039] advisory. + +[id="new-features-312-1"] +=== {productname} 3.12.1 new features + +With this release, NetApp ONTAP S3 object storage is now supported. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-netapp-ontap[NetApp ONTAP S3 object storage]. + +[id="known-issues-312-1"] +=== {productname} 3.12.1 known issues + +When using NetApp ONTAP S3 object storage, images with large layer sizes fail to push. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-7462[*PROJQUAY-7462*]). + +[id="bug-fixes-312-1"] +=== {productname} 3.12.1 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-7177[PROJQUAY-7177]. Previously, global read-only superusers could not obtain resources from an organization when using the API. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-7446[PROJQUAY-7446]. Previously, global read-only superusers could not obtain correct information when using the `listRepos` API endpoints. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-7449[PROJQUAY-7449]. Previously, global read-only superusers could not use some `superuser` API endpoints. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-7487[PROJQUAY-7487]. Previously, when a repository had multiple notifications enabled, the wrong type of event notification could be triggered. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-7491[PROJQUAY-7491]. When using NetAPP's OnTAP S3 implementation, the follow errors could be returned: `presigned URL request computed using signature-version v2 is not supported by ONTAP-S3`. This error occurred because `boto` iterates over a map of authentications if none is requested, and returns `v2` because it is ordered earlier than `v4`. This issue has been fixed, and the error is no longer returned. + +* link:https://issues.redhat.com/browse/PROJQUAY-7578[PROJQUAY-7578]. On the 3.12.1 UI, the release notes pointed to {productname}'s 3.7 release. This has been fixed, and they now point to the current version. + + +[id="upgrade-312-1"] +=== Upgrading to {productname} 3.12.1 + +For information about upgrading standalone {productname} deployments, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html/upgrade_red_hat_quay/standalone-upgrade[Standalone upgrade]. + +For information about upgrading {productname-ocp}, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html/upgrade_red_hat_quay/operator-upgrade[Upgrading the {productname} Operator]. + +[id="rn-3-12-0"] +== RHBA-2024:4525 - {productname} 3.12.0 release + +Issued 2024-07-23 + +{productname} release 3.12 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:4525[RHBA-2024:4525] advisory. For the most recent compatibility matrix, see link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Tested Integrations]. + +[id="release-cadence-312"] +== {productname} release cadence + +With the release of {productname} 3.10, the product has begun to align its release cadence and lifecycle with {ocp}. As a result, {productname} releases are now generally available (GA) within approximately four weeks of the most recent version of {ocp}. Customers can not expect the support lifecycle phases of {productname} to align with {ocp} releases. + +For more information, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="documentation-changes-312"] +== {productname} documentation changes + +The following documentation changes have been made with the {productname} {producty} release: + +* The link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index[Use {productname} guide] now includes accompanying API procedures for basic operations, such as creating and deleting repositories and organizations by using the API, access management, and so on. + +[id="new-features-and-enhancements-312"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="splunk-event-collector-enhancements"] +=== Splunk event collector enhancements + +With this update, {productname} administrators can configure their deployment to forward action logs directly to a Splunk HTTP Event Collector (HEC). This enhancement enables seamless integration with Splunk for comprehensive log management and analysis. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Configuring action log storage for Splunk]. + +[id="api-token-ownership"] +=== API token ownership + +Previously, when a {productname} organization owner created an API OAuth token, and that API OAuth token was used by another organization member, the action was logged to the creator of the token. This was undesirable for auditing purpose, notably in restricted environments where only dedicated registry administrators are organization owners. + +With this release, organization administrators can now assign OAuth API tokens to be created by other users with specific permissions. This allows the audit logs to be reflected accurately when the token is used by a user that has no organization administrative permissions to create an OAuth API token. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#reassigning-oauth-access-token[Reassigning an OAuth access token]. + +[id="image-expiration-event"] +=== Image expiration notification + +Previously, {productname} administrators and users had no way of being alerted when an image was about to expire. With this update, an event can be configured to notify users when an image is about to expire. This helps {productname} users avoid unexpected pull failures. + +Image expiration event triggers can be configured to notify users through email, Slack, webhooks, and so on, and can be configured at the repository level. Triggers can be set for images expiring in any amount of days, and can work in conjunction with the auto-pruning feature. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#creating-image-expiration-notification[Creating an image expiration notification]. + +[id="auto-pruning-enhancements"] +=== {productname} auto-pruning enhancements + +With the release of {productname} 3.10, a new auto-pruning feature was released. With that feature, {productname} administrators could set up auto-pruning policies on namespaces for both users and organizations so that image tags were automatically deleted based on specified criteria. In {productname} 3.11, this feature was enhanced so that auto-pruning policies could be set up on specified repositories. + +With this release, default auto-pruning policies can now be set up at the registry level. Default auto-pruning policies set up at the registry level can be configured on new and existing organizations. This feature saves {productname} administrators time, effort, and storage by enforcing registry-wide rules. + +{productname} administrators must enable this feature by updating their `config.yaml` file to include the `DEFAULT_NAMESPACE_AUTOPRUNE_POLICY` configuration field and one of `number_of_tags` or `creation_date` methods. Currently, this feature cannot be enabled by using the v2 UI or the API. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="oci-compliance-updates"] +=== Open Container Initiative 1.1 implementation + +{productname} now supports the Open Container Initiative (OCI) 1.1 distribution spec version 1.1. Key highlights of this update include support for the following areas: + +* Enhanced capabilities for handling various types of artifacts, which provides better flexibility and compliance with OCI 1.1. +* Introduction of new reference types, which allows more descriptive referencing of artifacts. +* Introduction of the _referrers API_, which aids in the retrieval and management of referrers, which helps improve container image management. +* Enhance UI to better visualize referrers, which makes it easier for users to track and manage dependencies. + +For more information about OCI spec 1.1, see link:https://github.com/opencontainers/distribution-spec/tree/v1.1.0-rc1?tab=readme-ov-file#oci-distribution-specification[OCI Distribution Specification]. + +For more information about OCI support and {productname}, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/oci-intro[Open Container Initiative support]. + +[id="metadata-support-annotations"] +=== Metadata support through annotations + +Some OCI media types do not utilize labels and, as such, critical information such as expiration timestamps are not included. With this release, {productname} now supports metadata passed through annotations to accommodate OCI media types that do not include these labels for metadata transmission. Tools such as ORAS (OCI Registry as Storage) can now be used to embed information with artifact types to help ensure that images operate properly, for example, to expire. + +For more information about OCI media types and how adding an annotation with ORAS works, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/oci-intro[Open Container Initiative support]. + +[id="v2-ui-enhancement"] +=== {productname} v2 UI enhancements + +The following enhancements have been made to the {productname} v2 UI. + +[id="robot-account-creation-enhancement"] +==== Robot account creation enhancement + +* When creating a robot account with the {productname} v2 UI, administrators can now specify that the kubernetes runtime use a secret only for a specific organization or repository. This option can be selected by clicking the name of your robot account on the v2 UI, and then clicking the *Kubernetes* tab. + +[id="new-quay-config-fields-312"] +== New {productname} configuration fields + +The following configuration fields have been added to {productname} {producty}. + +[id="oauth-reassign-configuration-field"] +=== OAuth access token reassignment configuration field + +The following configuration field has been added for reassigning OAuth access tokens: +|=== +| Field | Type | Description + +| *FEATURE_ASSIGN_OAUTH_TOKEN* | Boolean | Allows organization administrators to assign OAuth tokens to other users. +|=== + +.Example OAuth access token reassignment YAML +[source,yaml] +---- +# ... +FEATURE_ASSIGN_OAUTH_TOKEN: true +# ... +---- + +[id="notification-configuration-field"] +=== Notification interval configuration field + +The following configuration field has been added to enhance {productname} notifications: + +|=== +| Field | Type | Description +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* | Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. By default, this field is set to notify {productname} users of events happening every 5 hours. +|=== + +.Example notification re-run YAML +[source,yaml] +---- +# ... +NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES: 10 +# ... +---- + +[id="registry-auto-prune-configuration-fields"] +=== Registry auto-pruning configuration fields + +The following configuration fields have been added to {productname} auto-pruning feature: + +|=== +| Field | Type | Description +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* |Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. + + + +**Default:** `300` + +|*DEFAULT_NAMESPACE_AUTOPRUNE_POLICY* | Object | The default organization-wide auto-prune policy. + +|{nbsp}{nbsp}{nbsp} *.method: number_of_tags* | Object | The option specifying the number of tags to keep. + +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *method: number_of_tags*, denotes the number of tags to keep. + + +For example, to keep two tags, specify `2`. + +|{nbsp}{nbsp}{nbsp} *.method: creation_date* | Object | The option specifying the duration of which to keep tags. +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *creation_date*, denotes how long to keep tags. + + +Can be set to seconds (`s`), days (`d`), months (`m`), weeks (`w`), or years (`y`). Must include a valid integer. For example, to keep tags for one year, specify `1y`. + +|*AUTO_PRUNING_DEFAULT_POLICY_POLL_PERIOD* |Integer | The period in which the auto-pruner worker runs at the registry level. By default, it is set to run one time per day (one time per 24 hours). Value must be in seconds. + +|=== + +.Example registry auto-prune policy by number of tags +[source,yaml] +---- +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: number_of_tags + value: 10 +---- + +.Example registry auto-prune policy by creation date +[source,yaml] +---- +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: creation_date + value: 1y +---- + +[id="image-vulnerability-notification-field"] +=== Vulnerability detection notification configuration field + +The following configuration field has been added to notify users on detected vulnerabilities based on security level: + +|=== +| Field | Type | Description +| *NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX* | String | Set minimal security level for new notifications on detected vulnerabilities. Avoids creation of large number of notifications after first index. If not defined, defaults to `High`. Available options include `Critical`, `High`, `Medium`, `Low`, `Negligible`, and `Unknown`. +|=== + +.Example image vulnerability notification YAML +[source,yaml] +---- +NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX: High +---- + +[id="oci-referrers-api-configuration-field"] +=== OCI referrers API configuration field + +The following configuration field allows users to list OCI referrers of a manifest under a repository by using the v2 API: + +|=== +| Field | Type | Description +| *FEATURE_REFERRERS_API* | Boolean | Enables OCI 1.1's referrers API. +|=== + +.Example OCI referrers enablement YAML +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: True +# ... +---- + +[id="disable-strict-logging-configuration-field"] +=== Disable strict logging configuration field + +The following configuration field has been added to address when external systems like Splunk or ElasticSearch are configured as audit log destinations but are intermittently unavailable. When set to `True`, the logging event is logged to the stdout instead. + +|=== +| Field | Type | Description +| *ALLOW_WITHOUT_STRICT_LOGGING* | Boolean | When set to `True`, if the external log system like Splunk or ElasticSearch is intermittently unavailable, allows users to push images normally. Events are logged to the stdout instead. Overrides `ALLOW_PULLS_WITHOUT_STRICT_LOGGING` if set. +|=== + +.Example strict logging YAML +[source,yaml] +---- +# ... +ALLOW_WITHOUT_STRICT_LOGGING: True +# ... +---- + +[id="clair-index-layer-size-configuration-field"] +=== Clair indexing layer size configuration field + +The following configuration field has been added for the Clair security scanner, which allows {productname} administrators to set a maximum layer size allowed for indexing. + +|=== +| Field | Type | Description +| *SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE* | String | The maximum layer size allowed for indexing. If the layer size exceeds the configured size, the {productname} UI returns the following message: `The manifest for this tag has layer(s) that are too large to index by the Quay Security Scanner`. The default is `8G`, and the maximum recommended is `10G`. + + + *Example*: `8G` +|=== + +[id="new-api-endpoints-312"] +== API endpoint enhancements + +[id="new-changeorgquota-createorgquota-endpoints"] +=== New changeOrganizationQuota and createOrganizationQuota endpoints: + +The following optional API field has been added to the `changeOrganizationQuota` and `createOrganizationQuota` endpoints: + +|=== +|Name|Description|Schema + +|**limits** + +_optional_|Human readable storage capacity of the organization. Accepts SI units like Mi, Gi, or Ti, as well as non-standard units like GB or MB. Must be mutually exclusive with `limit_bytes`.|string +|=== + +Use this field to set specific limits when creating or changing an organization's quote limit. For more information about these endpoints, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquota[changeOrganizationQuota] and link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquota[createOrganizationQuota]. + +[id="new-referrer-endpoints"] +=== New referrer API endpoint + +The following API endpoint allows use to obtain referrer artifact information: + +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**referrers** + +_required_| Looks up the OCI referrers of a manifest under a repository.|string +|**manifest_digest** + +_required_|The digest of the manifest|string +|=== + +To use this field, you must generate a v2 API OAuth token and set `FEATURE_REFERRERS_API: true` in your `config.yaml` file. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-v2-oauth-access-token[Creating an OCI referrers OAuth access token]. + +[id="known-issues-and-limitations-312"] +== {productname} 3.12 known issues and limitations + +The following sections note known issues and limitations for {productname} {producty}. + +[id="v2-ui-known-issues-312"] +=== {productname} v2 UI known issues + +The {productname} team is aware of the following known issues on the v2 UI: + +* link:https://issues.redhat.com/browse/PROJQUAY-6910[*PROJQUAY-6910*]. The new UI can't group and stack the chart on usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6909[*PROJQUAY-6909*]. The new UI can't toggle the visibility of the chart on usage log +* link:https://issues.redhat.com/browse/PROJQUAY-6904[*PROJQUAY-6904*]. "Permanently delete" tag should not be restored on new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6899[*PROJQUAY-6899*]. The normal user can not delete organization in new UI when enable FEATURE_SUPERUSERS_FULL_ACCESS +* link:https://issues.redhat.com/browse/PROJQUAY-6892[*PROJQUAY-6892*]. The new UI should not invoke not required stripe and status page +* link:https://issues.redhat.com/browse/PROJQUAY-6884[*PROJQUAY-6884*]. The new UI should show the tip of slack Webhook URL when creating slack notification +* link:https://issues.redhat.com/browse/PROJQUAY-6882[*PROJQUAY-6882*]. The new UI global readonly super user can't see all organizations and image repos +* link:https://issues.redhat.com/browse/PROJQUAY-6881[*PROJQUAY-6881*]. The new UI can't show all operation types in the logs chart +* link:https://issues.redhat.com/browse/PROJQUAY-6861[*PROJQUAY-6861*]. The new UI "Last Modified" of organization always show N/A after target organization's setting is updated +* link:https://issues.redhat.com/browse/PROJQUAY-6860[*PROJQUAY-6860*]. The new UI update the time machine configuration of organization show NULL in usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6859[*PROJQUAY-6859*]. Thenew UI remove image repo permission show "undefined" for organization name in audit logs +* link:https://issues.redhat.com/browse/PROJQUAY-6852[*PROJQUAY-6852*]. "Tag manifest with the branch or tag name" option in build trigger setup wizard should be checked by default. +* link:https://issues.redhat.com/browse/PROJQUAY-6832[*PROJQUAY-6832*]. The new UI should validate the OIDC group name when enable OIDC Directory Sync +* link:https://issues.redhat.com/browse/PROJQUAY-6830[*PROJQUAY-6830*]. The new UI should show the sync icon when the team is configured sync team members from OIDC Group +* link:https://issues.redhat.com/browse/PROJQUAY-6829[*PROJQUAY-6829*]. The new UI team member added to team sync from OIDC group should be audited in Organization logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6825[*PROJQUAY-6825*]. Build cancel operation log can not be displayed correctly in new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6812[*PROJQUAY-6812*]. The new UI the "performer by" is NULL of build image in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6810[*PROJQUAY-6810*]. The new UI should highlight the tag name with tag icon in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6808[*PROJQUAY-6808*]. The new UI can't click the robot account to show credentials in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6807[*PROJQUAY-6807*]. The new UI can't see the operations types in log page when quay is in dark mode +* link:https://issues.redhat.com/browse/PROJQUAY-6770[*PROJQUAY-6770*]. The new UI build image by uploading Docker file should support .tar.gz or .zip +* link:https://issues.redhat.com/browse/PROJQUAY-6769[*PROJQUAY-6769*]. The new UI should not display message "Trigger setup has already been completed" after build trigger setup completed +* link:https://issues.redhat.com/browse/PROJQUAY-6768[*PROJQUAY-6768*]. The new UI can't navigate back to current image repo from image build +* link:https://issues.redhat.com/browse/PROJQUAY-6767[*PROJQUAY-6767*]. The new UI can't download build logs +* link:https://issues.redhat.com/browse/PROJQUAY-6758[*PROJQUAY-6758*]. The new UI should display correct operation number when hover over different operation type +* link:https://issues.redhat.com/browse/PROJQUAY-6757[*PROJQUAY-6757*]. The new UI usage log should display the tag expiration time as date format + +[id="limitations-312"] +=== {productname} 3.12 limitations + +The following features are not supported on IBM Power (`ppc64le`) or IBM Z (`s390x`): + +* Ceph RadosGW storage +* Splunk HTTP Event Collector (HEC) + +[id="bug-fixes-312"] +== {productname} bug fixes + +The following issues were fixed with {productname} {producty}: + +* link:https://issues.redhat.com/browse/PROJQUAY-6763[*PROJQUAY-6763*]. Quay 3.11 new UI operations of enable/disable team sync from OIDC group should be audited +* link:https://issues.redhat.com/browse/PROJQUAY-6826[*PROJQUAY-6826*]. Log histogram can't be hidden in the new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6855[*PROJQUAY-6855*]. Quay 3.11 new UI no usage log to audit operations under user namespace +* link:https://issues.redhat.com/browse/PROJQUAY-6857[*PROJQUAY-6857*]. Quay 3.11 new UI usage log chart covered the operations types list +* link:https://issues.redhat.com/browse/PROJQUAY-6931[*PROJQUAY-6931*]. OCI-compliant pagination +* link:https://issues.redhat.com/browse/PROJQUAY-6972[*PROJQUAY-6972*]. Quay 3.11 new UI can't open repository page when Quay has 2k orgs and 2k image repositories +* link:https://issues.redhat.com/browse/PROJQUAY-7037[*PROJQUAY-7037*]. Can't get slack and email notification when package vulnerability found +* link:https://issues.redhat.com/browse/PROJQUAY-7069[*PROJQUAY-7069*]. Invalid time format error messages and layout glitches in tag expiration modal +* link:https://issues.redhat.com/browse/PROJQUAY-7107[*PROJQUAY-7107*]. Quay.io overview page does not work in dark mode +* link:https://issues.redhat.com/browse/PROJQUAY-7239[*PROJQUAY-7239*]. Quay logging exception when caching specific `security_reports` +* link:https://issues.redhat.com/browse/PROJQUAY-7304[*PROJQUAY-7304*]. security: Add Vary header to 404 responses +* link:https://issues.redhat.com/browse/PROJQUAY-6973[*PROJQUAY-6973*]. Add OCI Pagination +* link:https://issues.redhat.com/browse/PROJQUAY-6974[*PROJQUAY-6974*]. Set a default auto-pruning policy at the registry level +* link:https://issues.redhat.com/browse/PROJQUAY-6976[*PROJQUAY-6976*]. Org owner can change ownership of API tokens +* link:https://issues.redhat.com/browse/PROJQUAY-6977[*PROJQUAY-6977*]. Trigger event on image expiration +* link:https://issues.redhat.com/browse/PROJQUAY-6979[*PROJQUAY-6979*]. Annotation Parsing +* link:https://issues.redhat.com/browse/PROJQUAY-6980[*PROJQUAY-6980*]. Add support for a global read only superuser +* link:https://issues.redhat.com/browse/PROJQUAY-7360[*PROJQUAY-7360*]. Missing index on subject_backfilled field in manifest table +* link:https://issues.redhat.com/browse/PROJQUAY-7393[*PROJQUAY-7393*]. Create backfill index concurrently +* link:https://issues.redhat.com/browse/PROJQUAY-7116[*PROJQUAY-7116*]. Allow to ignore audit logging failures + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.New features tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.12 | Quay 3.11 | Quay 3.10 + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk HTTP Event Collector (HEC)] support +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#oci-intro[Open Container Initiative 1.1 support] +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#reassigning-oauth-access-token[Reassigning an OAuth access token] +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#creating-image-expiration-notification[Creating an image expiration notification] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#oidc-team-sync[Team synchronization for {productname} OIDC deployments] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#configuring-aws-sts-quay[Configuring AWS STS for {productname}], link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#configuring-aws-sts-quay[Configuring AWS STS for {productname-ocp}] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} repository auto-pruning] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#configuring-dark-mode-ui[Configuring dark mode on the {productname} v2 UI] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/use-quay-manage-repo#disabling-robot-account[Disabling robot accounts] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} namespace auto-pruning] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +[id="ibm-power-z-linuxone-support-matrix"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix + +.list of supported and unsupported features +[cols="3,1,1",options="header"] +|=== +|Feature |IBM Power |IBM Z and IBM(R) LinuxONE + +|Allow team synchronization via OIDC on Azure +|Not Supported +|Not Supported + +|Backing up and restoring on a standalone deployment +|Supported +|Supported + +|Clair Disconnected +|Supported +|Supported + +|Geo-Replication (Standalone) +|Supported +|Supported + +|Geo-Replication (Operator) +|Not Supported +|Not Supported + +|IPv6 +|Not Supported +|Not Supported + +|Migrating a standalone to operator deployment +|Supported +|Supported + +|Mirror registry +|Not Supported +|Not Supported + +|PostgreSQL connection pooling via pgBouncer +|Supported +|Supported + +|Quay config editor - mirror, OIDC +|Supported +|Supported + +|Quay config editor - MAG, Kinesis, Keystone, GitHub Enterprise +|Not Supported +|Not Supported + +|Quay config editor - Red Hat Quay V2 User Interface +|Supported +|Supported + +|Quay Disconnected +|Supported +|Supported + +|Repo Mirroring +|Supported +|Supported +|=== \ No newline at end of file diff --git a/modules/rn_3_13_0.adoc b/modules/rn_3_13_0.adoc new file mode 100644 index 000000000..4e76e258d --- /dev/null +++ b/modules/rn_3_13_0.adoc @@ -0,0 +1,506 @@ +:_content-type: CONCEPT +[id="release-notes-313"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-13-4"] +== RHBA-2025:1079 - {productname} 3.13.4 release + +Issued 2025-02-20 + +{productname} release 3.13.4 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2025:1079[RHBA-2025:1079] advisory. + +[id="rn-3-13-3"] +== RHBA-2025:0301 - {productname} 3.13.3 release + +Issued 2025-01-20 + +{productname} release 3.13.3 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2025:0301[RHBA-2025:0301] advisory. + +[id="bug-fixes-313-3"] +=== {productname} 3.13.3 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-[PROJQUAY-8336]. Previously, when using {productname} with managed Quay and Clair PostgreSQL databases, Red Hat Advanced Cluster Security would scan all running `Quay` pods and report `High Image Vulnerability in Quay PostgreSQL database and Clair PostgreSQL database`. This issue has been resolved. + +[id="rn-3-13-2"] +== RHBA-2024:10967 - {productname} 3.13.2 release + +Issued 2024-12-17 + +{productname} release 3.13.2 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:10967[RHBA-2024:10967] advisory. + +[id="enhancements-313-2"] +=== {productname} 3.13.2 new features + +With this release, a pull-through cache organization can now be created when using the {productname} v2 UI. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#red-hat-quay-proxy-cache-procedure[Using {productname} to proxy a remote registry]. + +[id="known-issue-313-2"] +=== {productname} 3.13.2 known issues + +When using the pull-through proxy feature in {productname} with quota management enabled, and the organization quota fills up, it is expected that {productname} removes the least recently used image to free up space for new cached entries. However, images pull by digest are not evicted automatically when the quota is exceeded, which causes subsequent pull attempts to return a `Quota has been exceeded on namespace` error. + +As a temporary workaround, you can run a bash shell inside of the {productname} database pod to make digest-pulled images visible for eviction with the following setting: `update tag set hidden = 0;`. For more information, see link:https://issues.redhat.com/browse/PROJQUAY-8071[PROJQUAY-8071]. + +[id="bug-fixes-313-2"] +=== {productname} 3.13.2 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-8273[PROJQUAY-8273],link:https://issues.redhat.com/browse/PROJQUAY-6474[PROJQUAY-6474]. When deploying {productname} with an custom `HorizontalPodAutoscaler` component and then setting the component to `managed: false` in the `QuayRegistry` custom resource definition (CRD), the {productname} Operator continuously terminates and resets the `minReplicas` value to 2 for `mirror` and `clair` components. To work around this issue, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-unmanaged-hpa[Using unmanaged Horizontal Pod Autoscalers]. + +* link:https://issues.redhat.com/browse/PROJQUAY-8208[PROJQUAY-8208]. Previously, {productname} would return a `501` error on repository or organization creation with the authorization type was set to OIDC and restricted users were set. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-8269[PROJQUAY-8269]. Previously, on the {productnamne} UI, the OAuth scopes page suggested that scopes could be applied to robot accounts. This was not the case. Wording on the OAuth scopes page of the UI has been fixed. + +[id="rn-3-13-1"] +== RHBA-2024:9478 - {productname} 3.13.1 release + +Issued 2024-11-18 + +{productname} release 3.13.1 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:9478[RHBA-2024:9478] advisory. + +[id="information-upgrading-3-13-1"] +== Information about upgrading to 3.13.1 + +Previously, when attempting to upgrade to {productname} 3.13, if FIPS mode was enabled for your {ocp} cluster with Clair enabled, Clair would not function in your cluster. This issue was resolved in version 3.13.1. Upgrading to {productname} 3.13 automatically upgrades users to version 3.13.1 so that this issue is avoided. Additionally, if you are upgrading from 3.13 to 3.13.1 and FIPs was enabled, upgrading to 3.13.1 resolves the issue. (link:https://issues.redhat.com/browse/PROJQUAY-8185[*PROJQUAY-8185*]) + +[id="enhancements-313-1"] +=== {productname} 3.13.1 enhancements + +With the release of {productname} 3.13.1, Hitachi Content Platform (HCP) is now supported for use a storage backend. This allows organizations to leverage HCP for scalable, secure, and reliable object storage within their {productname} registry deployments. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-hcp[HCP Object Storage]. + +[id="known-issues-313-1"] +=== {productname} 3.13.1 known issues + +When using Hitachi Content Platform for your object storage, attempting to push an image with a large layer to a {productname} registry results in the following error: + +[source,text] +---- +An error occurred (NoSuchUpload) when calling the CompleteMultipartUpload operation: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. +---- + +This is a known issue and will be fixed in a future version of {productname}. + +[id="bug-fixes-313-1"] +=== {productname} 3.13.1 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-8185[PROJQUAY-8185]. Previously, when attempting to upgrade {productname-ocp} to 3.13 with FIPS mode enabled, the upgrade would fail for deploying using Clair. This issue has been resolved. Upgrading to 3.13.1 does not fail for {productname-ocp} using Clair with FIPS mode enabled. + +* link:https://issues.redhat.com/browse/PROJQUAY-8024[PROJQUAY-8024]. Previously, using Hitachi HCP v9.7 as your storage provider would return errors when attempting to pull images. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-5086[PROJQUAY-5086]. Previously, {productname-ocp} would produce information about horizontal pod autoscalers (HPAs) for some components (for example, `Clair`, `Redis`, `PostgreSQL`, and `ObjectStorage`) when they were unmanaged by the Operator. This issue has been resolved and information about HPAs are not longer reported for unmanaged components. + +[id="rn-3-13-0"] +== RHBA-2024:8408 - {productname} 3.13.0 release + +Issued 2024-10-30 + +{productname} release 3.13 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:8408[RHBA-2024:8408] advisory. For the most recent compatibility matrix, see link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Tested Integrations]. For information the release cadence of {productname}, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="documentation-changes-313"] +== {productname} documentation changes + +The following documentation changes have been made with the {productname} {producty} release: + +* The {productname} _Builders_ feature that was originally documented in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index[Using {productname} guide] has been moved into a new, dedicated book titled "link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/builders_and_image_automation/index[Builders and image automation]". + +* The {productname} _Builders_ feature that was originally documented in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#red-hat-quay-builders-enhancement[{productname} Operator features] has been moved into a new, dedicated book titled "link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/builders_and_image_automation/index[Builders and image automation]". + +* A new book titled "link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index[Securing {productname}]" has been created. This book covers SSL and TLS for {productname}, and adding additional certificate authorities (CAs) to your deployment. More content will be added to this book in the future. + +* A new book titled "link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/managing_access_and_permissions/index[Managing access and permissions]" has been created. This book covers topics related to access controls, repository visibility, and robot accounts by using the UI and the API. More content will be added to this book in the future. + +[id="upgrading-quay-313"] +== Upgrading to {productname} 3.13 + +With {productname} 3.13, the `volumeSize` parameter has been implemented for use with the `clairpostgres` component of the `QuayRegistry` custom resource definition (CRD). This replaces the `volumeSize` parameter that was previously used for the `clair` component of the same CRD. + +If your {productname} 3.12 `QuayRegistry` custom resource definition (CRD) implemented a volume override for the `clair` component, you must ensure that the `volumeSize` field is included under the `clairpostgres` component of the `QuayRegistry` CRD. + +[IMPORTANT] +==== +Failure to move `volumeSize` from the `clair` component to the `clairpostgres` component will result in a failed upgrade to version 3.13. +==== + +For example: + +[source,yaml] +---- +spec: + components: + - kind: clair + managed: true + - kind: clairpostgres + managed: true + overrides: + volumeSize: +---- + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/upgrade_red_hat_quay/index[Upgrade {productname}]. + +[id="new-features-and-enhancements-313"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="auto-pruning-enhancements"] +=== {productname} auto-pruning enhancements + +With the release of {productname} 3.10, a new auto-pruning feature was released. With that feature, {productname} administrators could set up auto-pruning policies on namespaces for both users and organizations so that image tags were automatically deleted based on specified criteria. In {productname} 3.11, this feature was enhanced so that auto-pruning policies could be set up on specified repositories. + +With {productname} 3.12, default auto-pruning policies default auto-pruning policies were made to be set up at the registry level on new and existing configurations, which saved {productname} administrators time, effort, and storage by enforcing registry-wide rules. + +With the release of {productname} {producty}, the following enhancements have been made to the auto-pruning feature. + +[id="tag-specification-patterns"] +==== Tag specification patterns in auto-pruning policies + +Previously, the {productname} auto-pruning feature could not target or exclude specific image tags. With the release of {productname} {producty}, it is now possible to specify a _regular expression_, or _regex_ to match a subset of tags for both organization- and repository-level auto-pruning policies. This allows {productname} administrators more granular auto-pruning policies to target only certain image tags for removal. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#autopruning-regular-expressions[Using regular expressions with auto-pruning]. + +[id="multiple-auto-pruning-policies"] +==== Multiple auto-pruning policies + +Previously, {productname} only supported a single auto-pruning policy per organization and repository. With the release of {productname} {producty}, multiple auto-pruning policies can now be applied to an organization or a repository. These auto-pruning policies can be based on different tag naming (regex) patterns to cater for the different life cycles of images in the same repository or organization. This feature provides more flexibility when automating the image life cycle in your repository. + +Additional auto-pruning policies can be added on the {productname} v2 UI by clicking *Add Policy* on the *Auto-Pruning Policies* page. They can also be added by using the API. + +For more information about setting auto-prune policies, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="keyless-auth-robot-accounts"] +=== Keyless authentication with robot accounts + +In previous versions of {productname}, robot account tokens were valid for the lifetime of the token unless deleted or regenerated. Tokens that do not expire have security implications for users who do not want to store long-term passwords or manage the deletion, or regeneration, or new authentication tokens. + +With {productname} {producty}, {productname} administrators are provided the ability to exchange {productname} robot account tokens for an external OIDC token. This allows robot accounts to leverage short-lived, or _ephemeral tokens_, that last one hour. Ephemeral tokens are refreshed regularly and can be used to authenticate individual transactions. + +This feature greatly enhances the security of your {productname} registry by mitigating the possibility of robot token exposure by removing the tokens after one hour. + +For more information, see https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#keyless-authentication-robot-accounts[Keyless authentication with robot accounts]. + +[id="quay-operator-updates-313"] +== {productname-ocp} new features and enhancements + +The following updates have been made to {productname-ocp}. + +[id="certificate-based-auth-quay-postgresql"] +=== Support for certificate-based authentication between {productname} and PostgreSQL + +With this release, support for certificate-based authentication between {productname} and PostgreSQL has been added. This allows {productname} administrators to supply their own SSL/TLS certificates that can be used for client-side authentication with PostgreSQL or CloudSQL. This provides enhanced security and allows for easier automation for your {productname} registry. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#cert-based-auth-quay-sql[Certificate-based authentication between {productname} and SQL]. + +[id="v2-ui-enhancement"] +=== {productname} v2 UI enhancements + +The following enhancements have been made to the {productname} v2 UI. + +[id="robot-federation-v2-ui-enhancement"] +==== Robot federation selection + +A new configuration page, *Set robot federation*, has been added to the {productname} v2 UI. This can be found by navigating to your organization or repository's robot account, clicking the menu kebab, and then clicking *Set robot federation*. This page is used when configuring keyless authentication with robot accounts, and allows you to add multiple OIDC providers to a single robot account. + +For more information, see https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#keyless-authentication-robot-accounts[Keyless authentication with robot accounts]. + +[id="new-quay-config-fields-313"] +== New {productname} configuration fields + +The following configuration fields have been added to {productname} {producty}. + +[id="disable-pushes-configuration-field"] +=== Disabling pushes to the {productname} registry configuration field + +In some cases, a read-only option for {productname} is not possible since it requires inserting a service key and other manual configuration changes. With the release of {productname} 3.13, a new configuration field has been added: `DISABLE_PUSHES`. + +When `DISABLE_PUSHES` is set to `true`, users are unable to push images or image tags to the registry when using the CLI. Most other registry operations continue as normal when this feature is enabled by using the {productname} UI. For example, changing tags, editing a repository, robot account creation and deletion, user creation, and so on are all possible by using the UI. + +When `DISABLE_PUSHES` is set to `true`, the {productname} garbage collector is disabled. As a result, when `PERMANENTLY_DELETE_TAGS` is enabled, using the {productname} UI to permanently delete a tag does not result in the immediate deletion of a tag. Instead, the tag stays in the repository until `DISABLE_PUSHES` is set to `false`, which re-enables the garbage collector. {productname} administrators should be aware of this caveat when using `DISABLE_PUSHES` and `PERMANENTLY_DELETE_TAGS` together. + +This field might be useful in some situations such as when {productname} administrators want to calculate their registry's quota and disable image pushing until after calculation has completed. With this method, administrators can avoid putting putting the whole registry in `read-only` mode, which affects the database, so that most operations can still be done. + +|=== +| Field | Type | Description + +|*DISABLE_PUSHES* |Boolean | Disables pushes of new content to the registry while retaining all other functionality. Differs from `read-only` mode because database is not set as `read-only`. Defaults to `false`. +|=== + +.Example DISABLE_PUSHES configuration field +[source,yaml] +---- +# ... +DISABLE_PUSHES: true +# ... +---- + +[id="new-api-endpoints-313"] +== API endpoint enhancements + +[id="new-auto-prune-policy-endpoints"] +=== New autoPrunePolicy endpoints + +`tagPattern` and `tagPatternMatches` API parameters have been added to the following API endpoints: + +* `createOrganizationAutoPrunePolicy` +* `updateOrganizationAutoPrunePolicy` +* `createRepositoryAutoPrunePolicy` +* `updateRepositoryAutoPrunePolicy` +* `createUserAutoPrunePolicy` +* `updateUserAutoPrunePolicy` + +These fields enhance the auto-pruning feature by allowing {productname} administrators more control over what images are pruned. The following table provides descriptions of these fields: + +|=== +|Name|Description|Schema + +|**tagPattern** + +_optional_|Tags only matching this pattern (regex) will be pruned. |string + +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern. |boolean +|=== + +For example API commands, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="federated-robot-token-api-endpoints"] +=== New federated robot token API endpoints + +The following API endpoints have been added for the keyless authentication with robot accounts feature: + +* `GET oauth2/federation/robot/token`. Use this API endpoint to return an expiring robot token using the robot identity federation mechanism. + +* `POST /api/v1/organization/{orgname}/robots/{robot_shortname}/federation`. Use this API endpoint to create a federation configuration for the specified organization robot. + +[id="notable-technical-changes-313"] +== {productname} 3.13 notable technical changes + +Clair now requires its PostgreSQL database to be version 15. For standalone {productname} deployments, administrators must manually migrate their database over from PostgreSQL version 13 to version 15. For more information about this procedure, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrading-clair-postgresql-database[Upgrading the Clair PostgreSQL database]. + +For {productname-ocp} deployments, this update is automatically handled by the Operator so long as your Clair PostgreSQL database is currently using version 13. + +[id="known-issues-and-limitations-313"] +== {productname} 3.13 known issues and limitations + +The following sections note known issues and limitations for {productname} {producty}. + +[id="clair-suse-enterprise-known-issue"] +=== Clair vulnerability report known issue + +When pushing Suse Enterprise Linux Images with *HIGH* image vulnerabilities, Clair 4.8.0 does not report these vulnerabilities. This is a known issue and will be fixed in a future version of {productname}. + +[id="fips-mode-known-issue"] +=== FIPS mode known issue + +If FIPS mode is enabled for your {ocp} cluster and you use Clair, you must not upgrade the {productname} Operator to version {producty}. If you upgrade, Clair will not function in your cluster. (link:https://issues.redhat.com/browse/PROJQUAY-8185[*PROJQUAY-8185*]) + +[id="registry-auto-pruning-known-issue"] +=== Registry auto-pruning known issues + +The following known issues apply to the auto-pruning feature. + +[id="policy-prioritization-known-issue"] +==== Policy prioritization known issue + +Currently, the auto-pruning feature prioritizes the following order when configured: + +. Method: `creation_date` + `organization wide` +. Method: `creation_date` + `repository wide` +. Method: `number_of_tags` + `organization wide` +. Method: `number_of_tags` + `repository wide` + +This means that the auto-pruner first prioritizes, for example, an organization-wide policy set to expire tags by their creation date before it prunes images by the number of tags that it has. + +There is a known issue when configuring a registry-wide auto-pruning policy. If {productname} administrators configure a `number_of_tags` policy before a `creation_date` policy, it is possible to prune more than the intended set for the `number_of_tags` policy. This might lead to situations where a repository removes certain image tags unexpectedly. + +This is not an issue for organization or repository-wide auto-prune policies. This known issue only exists at the registry level. It will be fixed in a future version of {productname}. + +[id="unrecognizable-auto-prune-tag-patterns"] +==== Unrecognizable auto-prune tag patterns + +When creating an auto-prune policy, the pruner cannot recognize `\b` and `\B` patterns. This is a common behavior with regular expression patterns, wherein `\b` and `\B` match empty strings. {productname} administrators should avoid using _regex_ patterns that use `\B` and `\b` to avoid this issue. (link:https://issues.redhat.com/browse/PROJQUAY-8089[*PROJQUAY-8089*]) + +[id="v2-ui-known-issues-313"] +=== {productname} v2 UI known issues + +The {productname} team is aware of the following known issues on the v2 UI: + +* link:https://issues.redhat.com/browse/PROJQUAY-6910[*PROJQUAY-6910*]. The new UI can't group and stack the chart on usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6909[*PROJQUAY-6909*]. The new UI can't toggle the visibility of the chart on usage log +* link:https://issues.redhat.com/browse/PROJQUAY-6904[*PROJQUAY-6904*]. "Permanently delete" tag should not be restored on new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6899[*PROJQUAY-6899*]. The normal user can not delete organization in new UI when enable FEATURE_SUPERUSERS_FULL_ACCESS +* link:https://issues.redhat.com/browse/PROJQUAY-6892[*PROJQUAY-6892*]. The new UI should not invoke not required stripe and status page +* link:https://issues.redhat.com/browse/PROJQUAY-6884[*PROJQUAY-6884*]. The new UI should show the tip of slack Webhook URL when creating slack notification +* link:https://issues.redhat.com/browse/PROJQUAY-6882[*PROJQUAY-6882*]. The new UI global readonly super user can't see all organizations and image repos +* link:https://issues.redhat.com/browse/PROJQUAY-6881[*PROJQUAY-6881*]. The new UI can't show all operation types in the logs chart +* link:https://issues.redhat.com/browse/PROJQUAY-6861[*PROJQUAY-6861*]. The new UI "Last Modified" of organization always show N/A after target organization's setting is updated +* link:https://issues.redhat.com/browse/PROJQUAY-6860[*PROJQUAY-6860*]. The new UI update the time machine configuration of organization show NULL in usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6859[*PROJQUAY-6859*]. Thenew UI remove image repo permission show "undefined" for organization name in audit logs +* link:https://issues.redhat.com/browse/PROJQUAY-6852[*PROJQUAY-6852*]. "Tag manifest with the branch or tag name" option in build trigger setup wizard should be checked by default. +* link:https://issues.redhat.com/browse/PROJQUAY-6832[*PROJQUAY-6832*]. The new UI should validate the OIDC group name when enable OIDC Directory Sync +* link:https://issues.redhat.com/browse/PROJQUAY-6830[*PROJQUAY-6830*]. The new UI should show the sync icon when the team is configured sync team members from OIDC Group +* link:https://issues.redhat.com/browse/PROJQUAY-6829[*PROJQUAY-6829*]. The new UI team member added to team sync from OIDC group should be audited in Organization logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6825[*PROJQUAY-6825*]. Build cancel operation log can not be displayed correctly in new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6812[*PROJQUAY-6812*]. The new UI the "performer by" is NULL of build image in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6810[*PROJQUAY-6810*]. The new UI should highlight the tag name with tag icon in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6808[*PROJQUAY-6808*]. The new UI can't click the robot account to show credentials in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6807[*PROJQUAY-6807*]. The new UI can't see the operations types in log page when quay is in dark mode +* link:https://issues.redhat.com/browse/PROJQUAY-6770[*PROJQUAY-6770*]. The new UI build image by uploading Docker file should support .tar.gz or .zip +* link:https://issues.redhat.com/browse/PROJQUAY-6769[*PROJQUAY-6769*]. The new UI should not display message "Trigger setup has already been completed" after build trigger setup completed +* link:https://issues.redhat.com/browse/PROJQUAY-6768[*PROJQUAY-6768*]. The new UI can't navigate back to current image repo from image build +* link:https://issues.redhat.com/browse/PROJQUAY-6767[*PROJQUAY-6767*]. The new UI can't download build logs +* link:https://issues.redhat.com/browse/PROJQUAY-6758[*PROJQUAY-6758*]. The new UI should display correct operation number when hover over different operation type +* link:https://issues.redhat.com/browse/PROJQUAY-6757[*PROJQUAY-6757*]. The new UI usage log should display the tag expiration time as date format + +[id="bug-fixes-313"] +== {productname} bug fixes + +The following issues were fixed with {productname} {producty}: + +* link:https://issues.redhat.com/browse/PROJQUAY-5681[*PROJQUAY-5681*]. Previously, when configuring an image repository with *Events and Notifications* to receive a Slack notification for *Push to Repository* and *Package Vulnerability Found*, no notification was returned of *new critical image vulnerability found*. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-7244[*PROJQUAY-7244*]. Previously, it was not possible to filter for repositories under specific organizations. This issue has been resolved, and you can now filter for repositories under specific organizations. +* link:https://issues.redhat.com/browse/PROJQUAY-7388[*PROJQUAY-7388*]. Previously, when {productname} was configured with OIDC authentication using Microsoft Azure Entra ID and team sync was enabled, removing the team sync resulted in the usage logs chart displaying *Undefined*. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-7430[*PROJQUAY-7430*]. Some public container image registries, for example, Google Cloud Registry, generate longer passwords for the login. When this happens, {productname} could not mirror images from those registries because the password length exceeded the maximum allowed in the {productname} database. ++ +The actual length limit imposed by the encryption mechanism is lower than `9000`. This implies that while the database can hold up to `9000` characters, the effective limit during encryption is actually `6000`, and be calculated as follows: {Max Password Length} = {field\_max\_length} - {_RESERVED\_FIELD\_SPACE}. A password length of `6000` ensures compatibility with AWS ECR and most registries. + +* link:https://issues.redhat.com/browse/PROJQUAY-7599[*PROJQUAY-7599*]. Previously, attempting to delete a manifest using a tag name and the {productname} v2 API resulted in a 405 error code. This was because there was no `delete_manifest_by_tagname` operation in the API. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-7606[*PROJQUAY-7606*]. Users can now create a new team using the dashes (`-`) via the v2 UI. Previously, this could only be done using the API. +* link:https://issues.redhat.com/browse/PROJQUAY-7686[*PROJQUAY-7686*]. Previously, the vulnerability page showed vertical scroll bars when provided URLs in the advisories were too big, which caused difficulties in reading information from the page. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-7982[*PROJQUAY-7982*]. There was a bug in the console service when using {quayio} for the first time. When attempting to create a user correlated with the console's user, clicking *Confirm username* refreshed the page and opened the same modal. This issue has been resolved. + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.New features tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.13 | Quay 3.12 | Quay 3.11 + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#keyless-authentication-robot-accounts[Keyless authentication with robot accounts] +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#cert-based-auth-quay-sql[Certificate-based authentication between {productname} and SQL] +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk HTTP Event Collector (HEC)] support +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#oci-intro[Open Container Initiative 1.1 support] +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#reassigning-oauth-access-token[Reassigning an OAuth access token] +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#creating-image-expiration-notification[Creating an image expiration notification] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#oidc-team-sync[Team synchronization for {productname} OIDC deployments] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#configuring-aws-sts-quay[Configuring AWS STS for {productname}], link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#configuring-aws-sts-quay[Configuring AWS STS for {productname-ocp}] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} repository auto-pruning] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +[id="ibm-power-z-linuxone-support-matrix"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix + +.list of supported and unsupported features +[cols="3,1,1",options="header"] +|=== +|Feature |IBM Power |IBM Z and IBM(R) LinuxONE + +|Allow team synchronization via OIDC on Azure +|Not Supported +|Not Supported + +|Backing up and restoring on a standalone deployment +|Supported +|Supported + +|Clair Disconnected +|Supported +|Supported + +|Geo-Replication (Standalone) +|Supported +|Supported + +|Geo-Replication (Operator) +|Supported +|Not Supported + +|IPv6 +|Not Supported +|Not Supported + +|Migrating a standalone to operator deployment +|Supported +|Supported + +|Mirror registry +|Supported +|Supported + +|PostgreSQL connection pooling via pgBouncer +|Supported +|Supported + +|Quay config editor - mirror, OIDC +|Supported +|Supported + +|Quay config editor - MAG, Kinesis, Keystone, GitHub Enterprise +|Not Supported +|Not Supported + +|Quay config editor - Red Hat Quay V2 User Interface +|Supported +|Supported + +|Quay Disconnected +|Supported +|Supported + +|Repo Mirroring +|Supported +|Supported +|=== \ No newline at end of file diff --git a/modules/rn_3_14_0.adoc b/modules/rn_3_14_0.adoc new file mode 100644 index 000000000..e5a9c552e --- /dev/null +++ b/modules/rn_3_14_0.adoc @@ -0,0 +1,287 @@ +:_content-type: CONCEPT +[id="release-notes-314"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-14-0"] +== RHBA-2024:8408 - {productname} 3.14.0 release + +Issued 2025-04-02 + +{productname} release {producty} is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2025:2467[RHBA-2025:2467] advisory. For the most recent compatibility matrix, see link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Tested Integrations]. For information the release cadence of {productname}, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="documentation-changes-314"] +== {productname} documentation changes + +The following documentation changes have been made with the {productname} 3.14 release: + +* The {productname} API guide has been updated and split into two books: + +** link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_guide/index[{productname} API guide]. This book contains an overview of the {productname} API, an overview of token types (OAuth 2.0 access tokens, robot account tokens, and OCI referrers OAuth access tokens), how to enable and use the {productname} API, suggestions for token management, and example commands for leveraging API endpoints to execute commands. This book is useful if you are new to the {productname} API or want information about its token types and how to leverage the API. + +** link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API reference]. This book contains all API reference endpoints and accompanying example commands for those endpoints. This book is useful if you are already familiar with using the {productname} API. + +[id="new-features-and-enhancements-314"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="clair-enhancements"] +=== Clair enhancements + +With this release, Clair indexer data is now included with downstream builds. This allows {productname} administrators to more easily reference indexers in the `clair-config.yaml` file when running Clair in an air-gapped or disconnected environment. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#clair-disconnected-environments[Clair in disconnected environments]. + +[id="model-card-rendering"] +=== Model card rendering on the v2 UI + +With the release of {productname} 3.14, the v2 UI now renders model card information for machine learning models that include a model card. When a manifest has a certain annotation (for example, `application/x-mlmodel`) and a model card stored as a layer in the manifest, a *Model Card* tab is displayed on the tag's information page. The information on the *Model Card* page provides users with comprehensive insights into each model, and can help enhance a user's understanding of models stored within their registry. + +[NOTE] +==== +The *Model Card* rendering page is only available on the {productname} v2 UI. +==== + +To view model card information, {productname} users or administrators must push an artifact to a repository. The artifact must have have an accompanying model card. This information renders under *Repository* -> ** -> *Model Card*. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-model-card-information[Viewing model card information by using the v2 UI]. + +[id="tag-expiration-enhancement"] +=== Tag expiration enhancement + +Previously, when configuring tag expiration for {productname}, the `yearly` option was unavailable on the {productname} v2 UI. With this update, users can now configure default tag expiration to occur yearly on the {productname} v2 UI. This can be set by using the {productname} UI or in your `config.yaml` file. For example: + +[source,yaml] +---- +DEFAULT_TAG_EXPIRATION: 1y +TAG_EXPIRATION_OPTIONS: + - 1y +---- + +[id="new-quay-config-fields-314"] +== {productname} configuration fields updates and changes + +The following configuration fields have been added to {productname} 3.14. + +[id="model-card-rendering-configuration-field"] +=== Model card rendering configuration fields + +The following configuration fields have been added for the model card rendering feature on the {productname} v2 UI: + +|=== +| Field | Type | Description + +|*FEATURE_UI_MODELCARD* |Boolean | Enables *Modelcard* image tab in UI. Defaults to `true`. +|*UI_MODELCARD_ARTIFACT_TYPE* | String | Defines the modelcard artifact type. +|*UI_MODELCARD_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|*UI_MODELCARD_LAYER_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|=== + +These configuration fields are enabled and set by default in your `config.yaml` file: + +.Example model card YAML +[source,yaml] +---- +FEATURE_UI_MODELCARD: true +UI_MODELCARD_ARTIFACT_TYPE: application/x-mlmodel +UI_MODELCARD_ANNOTATION: + org.opencontainers.image.description: "Model card metadata" +UI_MODELCARD_LAYER_ANNOTATION: + org.opencontainers.image.title: README.md +---- + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-model-card-information[Viewing model card information by using the v2 UI]. + +[id="ignore-unknown-mediatype-removal"] +=== IGNORE_UNKNOWN_MEDIATYPES configuration field removal + +The `IGNORE_UNKNOWN_MEDIATYPES` configuration field has been removed. By default, {productname} accepts all artifact types. + +[id="new-quay-footer-fields"] +=== New {productname} footer fields + +The following configuration fields have been added to the original (v1) UI. You can use these fields to customize the footer of your on-prem v1 UI. + +[NOTE] +==== +These fields are currently unavailable on the {productname} v2 UI. +==== + +|=== +| Field | Type | Description + +|*FOOTER_LINKS* |Object | Enable customization of footer links in {productname}'s UI for on-prem installations. + +|*.TERMS_OF_SERVICE_URL* | String | Custom terms of service for on-prem installations. + + + +**Example:** + +`https://index.hr` + +|*.PRIVACY_POLICY_URL* | String | Custom privacy policy for on-prem installations. + + + +**Example:** + +`https://index.hr` +|*.SECURITY_URL* | String | Custom security page for on-prem installations. + + + +**Example:** + +`https://index.hr` + +| **.ABOUT_URL** | String | Custom about page for on-prem installations. + + + +**Example:** + +`https://index.hr` +|=== + +.Example footer links YAML +[source,yaml] +---- +FOOTER_LINKS: + "TERMS_OF_SERVICE_URL": "https://www.index.hr" + "PRIVACY_POLICY_URL": "https://www.example.hr" + "SECURITY_URL": "https://www.example.hr" + "ABOUT_URL": "https://www.example.hr" +---- + +[id="new-api-endpoints-314"] +== API endpoint enhancements + +No new API endpoints were added in {productname} 3.14. + +[id="known-issues-and-limitations-314"] +== {productname} {producty} known issues and limitations + +The following sections note known issues and limitations for {productname} 3.14. + +[id="unsupported-image-types-stuck"] +=== Unsupported image types stuck in querying status + +When pushing an unsupported image type, for example, an AI model, to a {productname} registry, the *Security Report* and *Packages* pages on the UI fail to load. This occurs because these image types are stuck in a `Querying` status and, as a result, the pages of these tabs are left blank. This is a known issue and will be fixed in a future version of {productname}. + +[id="bug-fixes-314"] +== {productname} bug fixes + +The following issues were fixed with {productname} 3.14: + +* link:https://issues.redhat.com/browse/PROJQUAY-8532[*PROJQUAY-8532*]. Previously, there was an issue when updating Clair when deployed with Amazon Web Services (AWS) Relational Database Service (RDS) from version 12.19 to 15.7. After upgrading, scanning new images would result images being stuck in a `Queued` state and be unable to procedure a bug report. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-8131[*PROJQUAY-8131*]. Previously, users could receive an unknown exception when trying to serialize manifest type for caching on a referrer's endpoint. . This resulted in the following error: `Object of type Manifest is not JSON serializable`. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-8272[*PROJQUAY-8272*]. Previously, nested indexes, or intexes referring to another index, were broke in {productname}. This coiuld result in the following response when pushing to a registry: `Error response from registry: recognizable error message not found: PUT "https://quay.io/v2/arewm/oci-spec-1217/manifests/nested-index": response status code 500: Internal Server Error`. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-8559[*PROJQUAY-8559*]. Previously, a passport field in NGINX logs was not obfuscated. This issue has been resolved, and the `repeatPassword` value is hidden. + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.New features tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.14 | Quay 3.13 | Quay 3.12 + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-model-card-information[Viewing model card information by using the v2 UI]. +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#keyless-authentication-robot-accounts[Keyless authentication with robot accounts] +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#cert-based-auth-quay-sql[Certificate-based authentication between {productname} and SQL] +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk HTTP Event Collector (HEC)] support +|General Availability +|General Availability +|General Availability + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#oci-intro[Open Container Initiative 1.1 support] +|General Availability +|General Availability +|General Availability + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#reassigning-oauth-access-token[Reassigning an OAuth access token] +|General Availability +|General Availability +|General Availability + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#creating-image-expiration-notification[Creating an image expiration notification] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +[id="ibm-power-z-linuxone-support-matrix"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix + +.list of supported and unsupported features +[cols="3,1,1",options="header"] +|=== +|Feature |IBM Power |IBM Z and IBM(R) LinuxONE + +|Allow team synchronization via OIDC on Azure +|Not Supported +|Not Supported + +|Backing up and restoring on a standalone deployment +|Supported +|Supported + +|Clair Disconnected +|Supported +|Supported + +|Geo-Replication (Standalone) +|Supported +|Supported + +|Geo-Replication (Operator) +|Supported +|Not Supported + +|IPv6 +|Not Supported +|Not Supported + +|Migrating a standalone to operator deployment +|Supported +|Supported + +|Mirror registry +|Supported +|Supported + +|Quay config editor - mirror, OIDC +|Supported +|Supported + +|Quay config editor - MAG, Kinesis, Keystone, GitHub Enterprise +|Not Supported +|Not Supported + +|Quay config editor - Red Hat Quay V2 User Interface +|Supported +|Supported + +|Quay Disconnected +|Supported +|Supported + +|Repo Mirroring +|Supported +|Supported +|=== \ No newline at end of file diff --git a/modules/rn_3_80.adoc b/modules/rn_3_80.adoc index d9632c116..0aa37fd61 100644 --- a/modules/rn_3_80.adoc +++ b/modules/rn_3_80.adoc @@ -228,9 +228,9 @@ There is currently no workaround for this issue, and it will be addressed in a f + For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4588[PROJQUAY-4588]. -* There is a known issue when using the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field with the {productname} UI v2. When this field is set, all superuser actions on tenant content should be audited. Currently, when a superuser deletes an existing organization that is owned by a normal user, there is no way to audit that operation. This will be fixed in a future version of {productname}. +* There is a known issue when using the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field with the {productname} v2 UI. When this field is set, all superuser actions on tenant content should be audited. Currently, when a superuser deletes an existing organization that is owned by a normal user, there is no way to audit that operation. This will be fixed in a future version of {productname}. -* There is a known issue when using the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field with the {productname} UI v2. When setting this field to `true` in your config.yaml file, {productname} superusers can view organizations created by normal users, but cannot see the image repository. As a temporary workaround, superusers can view those repositories by navigating to them from the *Organizations* page. This will be fixed in a future version of {productname}. +* There is a known issue when using the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field with the {productname} v2 UI. When setting this field to `true` in your config.yaml file, {productname} superusers can view organizations created by normal users, but cannot see the image repository. As a temporary workaround, superusers can view those repositories by navigating to them from the *Organizations* page. This will be fixed in a future version of {productname}. * When setting the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field to `true`, superusers do not have permission to create a new image repository under a normal user's organization. This is a known issue and will be fixed in a future version of {productname}. @@ -312,8 +312,6 @@ For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4433[PR + For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4470[PROJQUAY-4470]. -* Currently, OpenShift Data Foundations (ODF) is unsupported when {productname} is deployed on IPv6 single stack environments. As a result, ODF cannot be used in IPv6 environments. This limitation is scheduled to be fixed in a future version of OpenShift Data Foundations. - * Currently, dual-stack (IPv4 and IPv6) support does not work on {productname} {ocp} deployments. When {productname} 3.8 is deployed on {ocp} with dual-stack support enabled, the Quay route generated by the {productname} Operator only generates an IPv4 address, and not an IPv6 address. As a result, clients with an IPv6 address cannot access the {productname} application on {ocp}. This limitation will be lifted upon the release of {ocp} 4.12. * Currently, Github and link:api.github.com[api.github.com] do not support IPv6. When {productname} is deployed on {ocp} with IPv6 enabled, the config editor cannot be configured to use Github authentication. diff --git a/modules/rn_3_90.adoc b/modules/rn_3_90.adoc new file mode 100644 index 000000000..e88861d64 --- /dev/null +++ b/modules/rn_3_90.adoc @@ -0,0 +1,396 @@ +:_content-type: CONCEPT +[id="rn-3-902"] += RHBA-2023:5345 - {productname} 3.9.2 release + +Issued 2023-09-26 + +{productname} release 3.9.2 is now available. + +As of September 25, 2023, the Code Ready Dependency Analytics (CRDA) service for Java vulnerability matching is no longer usable with Clair. The service's API moved to a different endpoint and there are no plans to update Clair to support this new endpoint. Instead, users should upgrade to {productname} {producty} in order to keep getting CVE reports on Java Maven packages indexed by Clair from container images stored in {productname}, with the additional benefit of offline support and without the need for separate API keys. + +The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:5345[RHBA-2023:5345] advisory. + +[id="bug-fixes-392"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-5174[PROJQUAY-5174]. Quay Operator doesn't trust internal service CA when it is rotated. +* link:https://issues.redhat.com/browse/PROJQUAY-5931[PROJQUAY-5931]. Duplicate Robot accounts +* link:https://issues.redhat.com/browse/PROJQUAY-5256[PROJQUAY-5256]. Storage replication not triggered on manifest list mirror + +[id="rn-3-901"] += RHBA-2023:4974 - {productname} 3.9.1 release + +Issued 2023-09-05 + +{productname} release 3.9.1 is now available with Clair 4.7.1. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:4974[RHBA-2023:4974] advisory. + +[id="bug-fixes-391"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-5581[PROJQUAY-5581]. Should show total quota consumption for user account namespace in UI. +* link:https://issues.redhat.com/browse/PROJQUAY-5691[PROJQUAY-5691]. CVE-2023-33733 python-reportlab: remote code execution via supplying a crafted PDF file [quay-3.9]. +* link:https://issues.redhat.com/browse/PROJQUAY-5702[PROJQUAY-5702]. CVE-2023-36464 quay-registry-container: pypdf: Possible Infinite Loop when a comment isn't followed by a character [quay-3]. +* link:https://issues.redhat.com/browse/PROJQUAY-5874[PROJQUAY-5874]. CVE-2021-33194 Vulnerabilities in dependency usr/local/bin/pushgateway (gobinary). +* link:https://issues.redhat.com/browse/PROJQUAY-5925[PROJQUAY-5925]. A lot of quotatotalworker error in quayregistry-quay-config-editor pod log. +* link:https://issues.redhat.com/browse/PROJQUAY-5914[PROJQUAY-5914]. Bulk update Repo settings in Robot accounts tab. +* link:https://issues.redhat.com/browse/PROJQUAY-5967[PROJQUAY-5967]. Quay 3.9.1 High Image Vulnerability reported by Redhat ACS. + +[id="rn-3-900"] += RHBA-2023:3256 - {productname} 3.9.0 release + +Issued 2023-08-14 + +{productname} release 3.9.0 is now available with Clair 4.7. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:3256[RHBA-2023:3256] advisory. + +[id="release-cadence-310"] +== {productname} release cadence + +With the next release of {productname}, version 3.10, the product will begin to align its release cadence and lifecycle with {ocp}. As a result, {productname} 3.10 will be generally available within approximately four weeks of the {ocp} 4.14 release, which is currently scheduled for release in early Q4, 2024. + +With the current release model, the total support length of {productname} 3.8 and {productname} 3.9 would have been cut short due to the release of {productname} 3.10 being scheduled earlier than previous releases. In order to provide customers with proper time to prepare for updates, the full support and maintenance phases of {productname} 3.8 and {productname} 3.9 have been amended to go beyond the release of {productname} 3.10. This is a one time amendment. After the release of {productname} 3.10 and subsequent releases, customers can expect the support lifecycle phases of {productname} to align with {ocp} releases. + +For more information, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="new-features-and-enhancements-39"] +== {productname} new features and enhancements + +The following updates have been made to {productname}: + +[id="rn-clair-47-enhancements"] +=== Clair 4.7 + +Clair 4.7 was released as part of {productname} 3.9. + +As of September 25, 2023, the Code Ready Dependency Analytics (CRDA) service for Java vulnerability matching will no longer be usable with Clair. The service's API moved to a different endpoint and there are no plans to update Clair to support this new endpoint. Instead, users should upgrade to {productname} {producty} in order to keep getting CVE reports on Java Maven packages indexed by Clair from container images stored in {productname}, with the additional benefit of offline support and without the need for separate API keys. + +Additional enhancements to Clair include the following: + +* Native support for indexing Golang modules and RubeGems in container images. +* Change to link:OSV.dev[OSV.dev] as the vulnerability database source for any programming language package managers. +** This includes popular sources like GitHub Security Advisories or PyPA. +** This allows offline capability. +* Use of pyup.io for Python and CRDA for Java is suspended. +* Clair now supports Java, Golang, Python, and Ruby dependencies. + +[id="single-site-georepl-removal"] +=== Removal of a single site in a geo-replicated environment + +{productname} administrators can now remove a specific site from their geo-replicated environment. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#operator-georepl-site-removal[Removing a geo-replicated site from your {productname} Operator deployment]. + +[id="quota-management-enhancements"] +=== Quota management enhancements + +* Prior to {productname} 3.9, the quota management feature created totals by combining the manifest sizes at the repository and namespace level. This created an issue wherein a single blob could be counted multiple times within the total. For example, in previous versions of {productname}, if blobs were referenced multiple times within a repository and namespace, the blob was counted towards the allotted quota for every time it was referenced. ++ +With this release, individual blob sizes are summed at the repository and namespace level. For example, if two tags in the same repository reference the same blob, the size of that blob is now only counted once towards the repository total. This enhancement to the quota management feature works by calculating the size of existing repositories and namespace with a backfill worker, and then adding or subtracting from the total for every image that is pushed or garbage collected afterwords. Additionally, the subtraction from the total happens when the manifest is garbage collected, whereas in the past it occurred when the tag was deleted. ++ +[NOTE] +==== +Because subtraction occurs from the total when the manifest is garbage collected, there is a delay in the size calculation until it is able to be garbage collected. For more information about {productname} garbage collection, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#garbage-collection[{productname} garbage collection]. +==== ++ +Additionally, manifest list totals are now counted toward the repository total, the total quota consumed when upgrading from a previous version of {productname} might be reportedly differently in {productname} 3.9. In some cases, the new total might go over a repository's previously-set limit. {productname} administrators might have to adjust the allotted quota of a repository to account for these changes. ++ +Collectively, the quota management feature in {productname} 3.9 provides a more accurate depiction of storage growth and registry consumption. As a result, users can place quota limits on the namespace and repository sizes based on the actual usage of storage by {productname}. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index?lb_target=stage&check_logged_in=1#red-hat-quay-quota-management-39[Quota management for {productname} 3.9] + +[id="configuring-action-log-storage-splunk"] +=== Configuring action log storage for Splunk + +With this release, {productname} administrators can forward logs to a Splunk deployment. This allows administrators to perform log analyses and offload the internal database. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Configuring action log storage for Splunk]. + +[id="quay-ui-v2-enhancements"] +=== {productname} v2 UI enhancements + +In {productname} 3.8, a new UI was introduced as a technology preview. With {productname} 3.9, the following enhancements have been made to the v2 UI: + +* A tab for robot account creation. +* A tab for Organization settings. +* A tab for image tags. +* A tab for Repository settings. +* Overview, Security Reports, and Package vulnerability reports. + +For more information about v2 UI enablement, see link:/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index?#using-v2-ui[Using the {productname} v2 UI]. + +[id="nutanix-object-storage"] +=== Nutanix Object Storage + +With this release, Nutanix Object Storage is now supported. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/configure_red_hat_quay/index#config-fields-nutanix[Nutanix Object Storage]. + + +[id="new-quay-config-fields"] +== New {productname} configuration fields + +The following configuration fields have been added to {productname} 3.9: + +* The following configuration fields have been added to the quota management feature: + +** **QUOTA_BACKFILL**: Enables the quota backfill worker to calculate the size of pre-existing blobs. Because this parameter sums the de-duplicated totals in the database, it might increase database load. ++ +*Default*: `True` + +** **QUOTA_TOTAL_DELAY_SECONDS**:The time delay for starting the quota backfill. Rolling deployments can cause incorrect totals. This field *must* be set to a time longer than it takes for the rolling deployment to complete. ++ +**Default**: `1800` + +** **PERMANENTLY_DELETE_TAGS**: Enables functionality related to the removal of tags from the time machine window. ++ +**Default**: `False` + +** **RESET_CHILD_MANIFEST_EXPIRATION**: Resets the expirations of temporary tags targeting the child manifests. With this feature set to `True`, child manifests are immediately garbage collected. ++ +**Default**: `False` + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/configure_red_hat_quay/index#config-updates-39[Configuration updates for {productname} 3.9]. + +* The following configuration field has been added to enhance the {productname} security scanner feature: + +** **FEATURE_SECURITY_SCANNER_NOTIFY_ON_NEW_INDEX**: Whether to allow sending notifications about vulnerabilities for new pushes. ++ +**Default**: `True` ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/configure_red_hat_quay/index#config-fields-scanner[Security scanner configuration fields]. + +* The following configuration field has been added to configure whether {productname} automatically removes old persistent volume claims (PVCs) when upgrading from version 3.8 -> 3.9: + +** **POSTGRES_UPGRADE_DELETE_BACKUP**: When set to `True`, removes old persistent volume claims (PVCs) after upgrading. ++ +**Default**: `False` + +* The following configuration field has been added to track various events: + +** **ACTION_LOG_AUDIT_LOGINS**: When set to `True`, tracks advanced events such as logging into, and out of, the UI, and logging in using Docker for regular users, robot accounts, and for application-specific token accounts. ++ +**Default**: `True` + +[id="quay-operator-updates"] +== {productname} Operator + +The following updates have been made to the {productname} Operator: + +* Currently, the {productname} Operator and Clair use PostgreSQL 10. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. ++ +With this release, if your database is managed by the {productname} Operator, updating from {productname} 3.8 -> 3.9 automatically handles upgrading PostgreSQL 10 to PostgreSQL 13. ++ +[IMPORTANT] +==== +Users with a managed database will be required to upgrade their PostgreSQL database from 10 -> 13. +==== ++ +If you do not want the {productname} Operator to upgrade your PostgreSQL deployment from 10 -> 13, you must set the PostgreSQL parameter to `managed: false` in your `quayregistry.yaml` file. For more information about setting your database to unmanaged, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-unmanaged-postgres[Using an existing Postgres database]. ++ +[IMPORTANT] +==== +* It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. +==== ++ +If you want your PostgreSQL database to match the same version as your {rhel} system, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/deploying_different_types_of_servers/using-databases#migrating-to-a-rhel-8-version-of-postgresql_using-postgresql[Migrating to a RHEL 8 version of PostgreSQL] for {rhel-short} 8 or link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_using_database_servers/using-postgresql_configuring-and-using-database-servers#migrating-to-a-rhel-9-version-of-postgresql_using-postgresql[Migrating to a RHEL 9 version of PostgreSQL] for {rhel-short} 9. + +For more information about the {productname} 3.8 -> 3.9 procedure, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/upgrade_red_hat_quay/index#operator-upgrade[Upgrading the {productname} Operator overview]. + + +[id="known-issues-and-limitations-39"] +== {productname} 3.9 known issues and limitations + +The following sections note known issues and limitations for {productname} 3.9. + +[id="known-issues-39"] +=== Known issues: + +[id="upgrading-known-issues"] +==== Upgrading known issues + +There are two known issues when upgrading your {productname} deployment: + +* If your {productname} deployment is upgrading from one y-stream to the next, for example, from 3.8.10 -> 3.8.11, you must not switch the upgrade channel from `stable-3.8` to `stable-3.9`. Changing the upgrade channel in the middle of a y-stream upgrade will disallow {productname} from upgrading to 3.9. This is a known issue and will be fixed in a future version of {productname}. + +* When upgrading from {productname} 3.7 to 3.9, you might receive the following error: `pg_dumpall: error: query failed: ERROR: xlog flush request 1/B446CCD8 is not satisfied --- flushed only to 1/B0013858`. As a workaround to this issue, you can delete the `quayregistry-clair-postgres-upgrade` job on your {ocp} deployment, which should resolve the issue. + +[id="other-known-issues"] +==== Other known issues + +* Using `conftest pull` commands to obtain policies might return the following error: `Error: download policies: client get: stat /policy/quayregistry-quay-quay-enterprise-847.apps.quaytest-847.qe.devcluster.openshift.com/conftest/policy:latest: no such file or directory`. As a workaround, you can add the `oci://` prefix on your registry host. For example: ++ +[source,terminal] +---- +$ conftest pull oci://mkoktest.quaydev.org/admin/conftest:v1 +---- ++ +This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-5573[*PROJQUAY-5573*]) + +* {productname} 3.9 introduced changes to the quota management feature. One of these changes is that tags in the time machine window now count towards the quota total of your organization. ++ +There is a known issue when the proxy cache feature is enabled and configured in a new organization with a link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#quota-management-arch[hard quota check] and time machine settings set to longer than *a few seconds* under their organization settings. In sum, tags in a proxy organization are all given a tag expiration that defaults to 1 day. If your proxy organization has a time machine policy set to longer than *a few seconds* under your organization settings, and the tag expires, it is not immediately available for garbage collection; it must wait to be outside of the time machine window before it can be garbage collected. Because subtraction happens upon garbage collection, and pruned tags are kept within the time frame allotted by your organization's settings, image tags are not immediately garbage collected. This results in the quota consumption metric not being updated, and runs the risk of your proxy organization going over the allotted quota. ++ +When a hard quota check is configured for a proxy organization, {productname} administrators will want to reclaim the space taken by tags within the time machine window to prevent organizations from hitting their allotted quota. As a temporary workaround, you can set the time machine expiration for proxy organizations to *a few seconds* under *Organizations* -> *Settings* on the {productname} UI. This immediately removes image tags and allows for more accurate quota consumption metrics. ++ +This is a non-issue for proxy organizations employing a soft quota check and can be ignored. + +* When removing a site from your geo-replicated {productname} deployment, you might receive the following error when running `python -m util.removelocation`: `/app/lib/python3.9/site-packages/tzlocal/unix.py:141: SyntaxWarning: "is not" with a literal. Did you mean "!="? while start is not 0: /app/lib/python3.9/site-packages/netaddr/strategy/{}init{}.py:189: SyntaxWarning: "is not" with a literal. Did you mean "!="? if word_sep is not ''`. You can confirm the deletion of your site by entering `y`. The error is a known issue and will be removed in a future version of {productname}. + +[id="limitations-39"] +=== {productname} 3.9 limitations + +* You must use the Splunk UI to view {productname} action logs. At this time, viewing Splunk action logs on the {productname} *Usage Logs* page is unsupported, and returns the following message: `Method not implemented. Splunk does not support log lookups`. + +[id="bug-fixes-39"] +== {productname} bug fixes + +* Previously, on {productname} Lightweight Directory Access Protocol (LDAP) deployments, there was a bug that disallowed referrals from being used with team synchronization and in other circumstances. With this update, referrals can be turned off globally for {productname} to ensure proper behavior across all components. + +* Previously, only last access timestamps were recorded in {productname}. This issue has been fixed, and now the following timestamps are recorded: ++ +** Login to the {productname} UI. +** Logout of the {productname} UI. +** Login via Docker CLI (registry API) for regular users. +** Login via Docker CLI (Registry API) for robot accounts. +** Login via Docker CLI (Registry API) for app-specific tokens accounts. ++ +You can disable this timestamp feature by setting `ACTION_LOG_AUDIT_LOGINS` to `false` in your `config.yaml` file. This field is set to `true` by default. ++ +[NOTE] +==== +Logout events from the client side (Docker or Podman) are not causing requests to the registry API and are therefore not trackable. +==== + +* link:https://issues.redhat.com/browse/PROJQUAY-4614[PROJQUAY-4614]. Add conftest mediatypes to default Quay configuration. +* link:https://issues.redhat.com/browse/PROJQUAY-4865[PROJQUAY-4865]. Remove unused dependencies. +* link:https://issues.redhat.com/browse/PROJQUAY-4957[PROJQUAY-4957]. Limit indexing of manifests that continuously fail. +* link:https://issues.redhat.com/browse/PROJQUAY-5009[PROJQUAY-5009]. secscan: add api client timeout. +* link:https://issues.redhat.com/browse/PROJQUAY-5018[PROJQUAY-5018]. Ignore unknown media types in manifests. +* link:https://issues.redhat.com/browse/PROJQUAY-5237[PROJQUAY-5237]. The number of repositories in organization is incorrect in new UI. +* link:https://issues.redhat.com/browse/PROJQUAY-4993[PROJQUAY-4993]. Support Action Log Forward to Splunk. +* link:https://issues.redhat.com/browse/PROJQUAY-4567[PROJQUAY-4567]. Robot Tokens. +* link:https://issues.redhat.com/browse/PROJQUAY-5289[PROJQUAY-5289]. Create a new username for accounts that login via SSO in the new UI. +* link:https://issues.redhat.com/browse/PROJQUAY-5362[PROJQUAY-5362]. API: Add filtering to Tags API. +* link:https://issues.redhat.com/browse/PROJQUAY-5207[PROJQUAY-5207]. Phase 3: Quay.io Summit Deliverables. +* link:https://issues.redhat.com/browse/PROJQUAY-4608[PROJQUAY-4608]. Quay Operator should install a fully supported version of Postgres for Quay and Clair. +* link:https://issues.redhat.com/browse/PROJQUAY-5050[PROJQUAY-5050]. Can't provide a link to quay directly to an image that works in both old UI and new UI. +* link:https://issues.redhat.com/browse/PROJQUAY-5253[PROJQUAY-5253]. Don't convert dashes to underscores during first login. +* link:https://issues.redhat.com/browse/PROJQUAY-4303[PROJQUAY-4303]. Multi-arch images are ignored in storage consumption calculation. +* link:https://issues.redhat.com/browse/PROJQUAY-4304[PROJQUAY-4304]. Empty repositories are reporting storage consumption. +* link:https://issues.redhat.com/browse/PROJQUAY-5634[PROJQUAY-5634]. oci: Allow optional components in the image config to be set to "null". +* link:https://issues.redhat.com/browse/PROJQUAY-5639[PROJQUAY-5639]. Quay 3.9.0 delete organization under normal user by superuser was failed with unauthorized error. +* link:https://issues.redhat.com/browse/PROJQUAY-5642[PROJQUAY-5642]. Quay 3.9.0 image High Vulnerability reported by Redhat ACS. +* link:https://issues.redhat.com/browse/PROJQUAY-5630[PROJQUAY-5630]. Quay 3.9.0 Quay image High vulnerability issue CVE-2022-28948. + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries older than the latest three releases. + +.Technology Preview tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.9 | Quay 3.8 | Quay 3.7 + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#operator-georepl-site-removal[Single site geo-replication removal] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk log forwarding] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/configure_red_hat_quay/index#config-fields-nutanix[Nutanix Object Storage] +|General Availability +|- +|- + +|Docker v1 support +|Deprecated +|Deprecated +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +| - + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#proc_manage-ipv6-dual-stack[FEATURE_LISTEN_IP_VERSION] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-super-users-enabling[LDAP_SUPERUSER_FILTER] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-restricted-users-enabling[LDAP_RESTRICTED_USER_FILTER] +|General Availability +|General Availability +| - + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-superusers-full-access[FEATURE_SUPERUSERS_FULL_ACCESS] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-global-readonly-super-users[GLOBAL_READONLY_SUPER_USERS] +|General Availability +|General Availability +| - + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-feature-restricted-users[FEATURE_RESTRICTED_USERS] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-restricted-users-whitelist[RESTRICTED_USERS_WHITELIST] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com//documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[Quota management and enforcement] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-builders-enhancement[{productname} build enhancements] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#quay-as-cache-proxy[{productname} as proxy cache for upstream registries] +|General Availability +|General Availability +|Technology Preview + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index[Geo-replication - {productname} Operator] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/manage_red_hat_quay#unmanaged_clair_configuration[Advanced Clair configuration] +|General Availability +|General Availability +|General Availability + +|Support for Microsoft Azure Government (MAG) +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#clair-crda-configuration[Java scanning with Clair] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +//// +[id="deprecated-features"] +=== Deprecated features +//// diff --git a/modules/robot-account-manage-api.adoc b/modules/robot-account-manage-api.adoc new file mode 100644 index 000000000..1546017d1 --- /dev/null +++ b/modules/robot-account-manage-api.adoc @@ -0,0 +1,4 @@ +[id="robot-account-manage-api"] += Creating and configuring robot accounts by using the {productname} API + +Robot accounts can be created, retrieved, changed, and deleted for both organizations and users by using the {productname} API. \ No newline at end of file diff --git a/modules/robot-account-overview.adoc b/modules/robot-account-overview.adoc new file mode 100644 index 000000000..dbd3c02bf --- /dev/null +++ b/modules/robot-account-overview.adoc @@ -0,0 +1,44 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="allow-robot-access-user-repo"] += {productname} Robot Account overview + +Robot Accounts are used to set up automated access to the repositories in +your +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +ifeval::["{context}" == "quay-security"] +{productname}. +endif::[] +registry. They are similar to {ocp} service accounts. + +Setting up a Robot Account results in the following: + +* Credentials are generated that are associated with the Robot Account. + +* Repositories and images that the Robot Account can push and pull images from are identified. + +* Generated credentials can be copied and pasted to use with different container clients, such as Docker, Podman, Kubernetes, Mesos, and so on, to access each defined repository. + +ifeval::["{context}" == "quay-security"] +Robot Accounts can help secure your {productname} registry by offering various security advantages, such as the following: + +* Specifying repository access. +* Granular permissions, such as `Read` (pull) or `Write` (push) access. They can also be equipped with `Admin` permissions if warranted. +* Designed for CI/CD pipelines, system integrations, and other automation tasks, helping avoid credential exposure in scripts, pipelines, or other environment variables. +* Robot Accounts use tokens instead of passwords, which provides the ability for an administrator to revoke the token in the event that it is compromised. + +endif::[] + +Each Robot Account is limited to a single user namespace or Organization. For example, the Robot Account could provide access to all repositories for the user `quayadmin`. However, it cannot provide access to repositories that are not in the user's list of repositories. + +Robot Accounts can be created using the {productname} UI, or through the CLI using the {productname} API. After creation, {productname} administrators can leverage more advanced features with Robot Accounts, such as keyless authentication. \ No newline at end of file diff --git a/modules/robot-account-permissions-api.adoc b/modules/robot-account-permissions-api.adoc new file mode 100644 index 000000000..71ccf8f1c --- /dev/null +++ b/modules/robot-account-permissions-api.adoc @@ -0,0 +1,76 @@ +:_content-type: CONCEPT +[id="robot-account-permissions-api"] += Obtaining robot account information by using the {productname} API + +Robot account information, such as permissions, can be obtained for both organizations and users by using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorgrobot[`GET /api/v1/organization/{orgname}/robots/{robot_shortname}`] API endpoint to return information for a robot for an organization: ++ +[source,terminal] +---- +curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/organization//robots/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test+example", "created": "Mon, 25 Nov 2024 16:25:16 -0000", "last_accessed": null, "description": "", "token": "BILZ6YTVAZAKOGMD9270OKN3SOD9KPB7OLKEJQOJE38NBBRUJTIH7T5859DJL31Q", "unstructured_metadata": {}} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorgrobotpermissions[`GET /api/v1/organization/{orgname}/robots/{robot_shortname}/permissions`] endpoint to return the list of permissions for a specific organization robot: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/organization//robots//permissions" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": [{"repository": {"name": "testrepo", "is_public": true}, "role": "admin"}]} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserrobot[`GET /api/v1/user/robots/{robot_shortname}`] API endpoint to return the user's robot with the specified name: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/user/robots/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "quayadmin+mirror_robot", "created": "Wed, 15 Jan 2025 17:22:09 -0000", "last_accessed": null, "description": "", "token": "QBFYWIWZOS1I0P0R9N1JRNP1UZAOPUIR3EB4ASPZKK9IA1SFC12LTEF7OJHB05Z8", "unstructured_metadata": {}} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserrobotpermissions[`GET /api/v1/user/robots/{robot_shortname}/permissions`] API endpoint to return a list of permissions for the user robot: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/user/robots//permissions" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": [{"repository": {"name": "busybox", "is_public": false}, "role": "write"}]} +---- \ No newline at end of file diff --git a/modules/robot-account-tokens.adoc b/modules/robot-account-tokens.adoc new file mode 100644 index 000000000..21fb10b49 --- /dev/null +++ b/modules/robot-account-tokens.adoc @@ -0,0 +1,17 @@ +:_content-type: REFERENCE +[id="robot-account-tokens"] += Robot account tokens + +Robot account _tokens_ are _password-type_ credentials used to access a {productname} registry via normal Docker v2 endpoints; these are defined as _tokens_ on the UI because the password itself is encrypted. + +Robot account tokens are persistent tokens designed for automation and continuous integration workflows. By default, {productname}'s robot account tokens do not expire and do not require user interaction, which makes robot accounts ideal for non-interactive use cases. + +Robot account tokens are automatically generated at the time of a robot's creation and are non-user specific; that is, they are connected to the user and organization namespace where where they are created. for example, a robot named `project_tools+` is associated with the `project_tools` namespace. + +Robot account tokens provide access without needing a user's personal credentials. How the robot account is configured, for example, with one of `READ`, `WRITE`, or `ADMIN` permissions, ultimately defines the actions that the robot account can take. + +Because robot account tokens are persistent and do not expire by default, they are ideal for automated workflows that require consistent access to {productname} without manual renewal. Despite this, robot account tokens can be easily re-generated by using the the UI. They can also be regenerated by using the proper API endpoint via the CLI. To enhance the security of your {productname} deployment, administrators should regularly refresh robot account tokens. Additionally, with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#keyless-authentication-robot-accounts[_keyless authentication with robot accounts_] feature, robot account tokens can be exchanged for external OIDC tokens and leveraged so that they only last one hour, enhancing the security of your registry. + +When a namespace gets deleted, or when the robot account is deleted itself, they are garbage collected when the collector is scheduled to run. + +The following section shows you how to use the API to re-generate a robot account token for organization robots and user robots. \ No newline at end of file diff --git a/modules/role-based-access-control-intro.adoc b/modules/role-based-access-control-intro.adoc index ae41696e2..73a888800 100644 --- a/modules/role-based-access-control-intro.adoc +++ b/modules/role-based-access-control-intro.adoc @@ -1,15 +1,15 @@ -[[role-based-access-control]] -= Role-based access control (RBAC) +[id="role-based-access-control"] += {productname} permissions model -{productname} offers three types of permissions: +{productname}'s permission model provides fine-grained access control over repositories and the content of those repositories, helping ensure secure collaboration and automation. {productname} administrators can grant users and robot accounts one of the following levels of access: -* `Read`, which allows users, robots, and teams to pull images. -* `Write`, which allows users, robots, and teams to push images. -* `Admin`, which provides users, robots, and teams with administrative privileges. +* *Read*: Allows users, robots, and teams to pull images. +* *Write*: Allows users, robots, and teams to push images. +* *Admin*: Provides users, robots, and teams administrative privileges. [NOTE] ==== Administrative users can delegate new permissions for existing users and teams, change existing permissions, and revoke permissions when necessary ==== -Permissions can be delegated across the entire organization and on specific repositories. For example, `Read` permissions can be set to a specific team within the organization, while `Admin` permissions can be given to all users across all repositories within the organization. +Collectively, these levels of access provide users or robot accounts the ability to perform specific tasks, like pulling images, pushing new versions of an image into the registry, or managing the settings of a repository. These permissions can be delegated across the entire organization and on specific repositories. For example, *Read* permissions can be set to a specific team within the organization, while *Admin* permissions can be given to all users across all repositories within the organization. diff --git a/modules/root-rule-config-api-example.adoc b/modules/root-rule-config-api-example.adoc index 5663341bc..7da3016c6 100644 --- a/modules/root-rule-config-api-example.adoc +++ b/modules/root-rule-config-api-example.adoc @@ -1,7 +1,7 @@ :_content-type: CONCEPT [id="root-rule-config-api-example"] -=== rule_rule object reference += rule_rule object reference [source,yaml] ---- diff --git a/modules/rotating-log-files.adoc b/modules/rotating-log-files.adoc new file mode 100644 index 000000000..b7c79256b --- /dev/null +++ b/modules/rotating-log-files.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="rotating-log-files"] += Rotating log files of {productname} containers + +In some cases, users have found that the log rotation configurations for Nginx within the {productname} image do not function as intended. This leads to the logs not being rotated properly. + +{productname} containers do not have a `logrotate` binary. Nginx logs are forwarded to `/dev/stdout` by default. The log rotation for these logs is governed by the container runtime that you are using. + +To address the log rotation for {productname} container logs, you must configure log rotation at the container runtime level. Refer to the documentation or configuration options of your container runtime to set up log rotation for {ProductName} container logs accordingly. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6974691[How to rotate log files of Quay containers?]. \ No newline at end of file diff --git a/modules/running-ldap-debug-mode.adoc b/modules/running-ldap-debug-mode.adoc new file mode 100644 index 000000000..2e7407a43 --- /dev/null +++ b/modules/running-ldap-debug-mode.adoc @@ -0,0 +1,26 @@ +:_content-type: PROCEDURE +[id="running-ldap-debug-mode"] += Running an LDAP {productname} deployment in debug mode + +Use the following procedure to run an LDAP deployment of {productname} in debug mode. + +.Procedure + +. Enter the following command to run your LDAP {productname} deployment in debug mode: ++ +[source,terminal] +---- +$ podman run -p 443:8443 -p 80:8080 -e DEBUGLOG=true -e USERS_DEBUG=1 -v /config:/conf/stack -v /storage:/datastorage -d {productrepo}/{quayimage}:{productminv} +---- + +. To view the debug logs, enter the following command: ++ +[source,terminal] +---- +$ podman logs +---- ++ +[IMPORTANT] +==== +Setting `USERS_DEBUG=1` exposes credentials in clear text. This variable should be removed from the {productname} deployment after debugging. The log file that is generated with this environment variable should be scrutinized, and passwords should be removed before sending to other users. Use with caution. +==== \ No newline at end of file diff --git a/modules/running-operator-debug-mode.adoc b/modules/running-operator-debug-mode.adoc new file mode 100644 index 000000000..e947ad7c7 --- /dev/null +++ b/modules/running-operator-debug-mode.adoc @@ -0,0 +1,29 @@ +:_content-type: PROCEDURE +[id="running-operator-debug-mode"] += Running the {productname} Operator in debug mode + +Use the following procedure to run the {productname} Operator in debug mode. + +.Procedure + +. Enter the following command to edit the `QuayRegistry` custom resource definition: ++ +[source,terminal] +---- +$ oc edit quayregistry -n +---- + +. Update the `QuayRegistry` to add the following parameters: ++ +[source,yaml] +---- +spec: + - kind: quay + managed: true + overrides: + env: + - name: DEBUGLOG + value: "true" +---- + +. After the {productname} Operator has restarted with debugging enabled, try pulling an image from the registry. If it is still slow, dump all logs from all `Quay` pods to a file, and check the files for more information. \ No newline at end of file diff --git a/modules/running-quay-debug-mode-intro.adoc b/modules/running-quay-debug-mode-intro.adoc new file mode 100644 index 000000000..881395a2b --- /dev/null +++ b/modules/running-quay-debug-mode-intro.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="running-quay-debug-mode-intro"] += Running {productname} in debug mode + +Red Hat recommends gathering your debugging information when opening a support case. Running {productname} in debug mode provides verbose logging to help administrators find more information about various issues. Enabling debug mode can speed up the process to reproduce errors and validate a solution for things like geo-replication deployments, Operator deployments, standalone {productname} deployments, object storage issues, and so on. Additionally, it helps the Red Hat Support to perform a root cause analysis. + +[id="debug-configuration-fields"] +== {productname} debug variables + +{productname} offers two configuration fields that can be added to your `config.yaml` file to help diagnose issues or help obtain log information. + +.Debug configuration variables +[cols="3a,1a,2a",options="header"] +|=== +| Variable | Type | Description +| **DEBUGLOG** | Boolean | Whether to enable or disable debug logs. Must be `true` or `false`. +| **USERS_DEBUG** |Integer. Either `0` or `1`. | Used to debug LDAP operations in clear text, including passwords. Must be used with `DEBUGLOG=TRUE`. + +[IMPORTANT] +==== +Setting `USERS_DEBUG=1` exposes credentials in clear text. This variable should be removed from the {productname} deployment after debugging. The log file that is generated with this environment variable should be scrutinized, and passwords should be removed before sending to other users. Use with caution. +==== +|=== \ No newline at end of file diff --git a/modules/running-quay-debug-mode.adoc b/modules/running-quay-debug-mode.adoc new file mode 100644 index 000000000..fc8c380ea --- /dev/null +++ b/modules/running-quay-debug-mode.adoc @@ -0,0 +1,23 @@ +:_content-type: PROCEDURE +[id="running-standalone-debug-mode"] += Running a standalone {productname} deployment in debug mode + +Running {productname} in debug mode provides verbose logging to help administrators find more information about various issues. Enabling debug mode can speed up the process to reproduce errors and validate a solution. + +Use the following procedure to run a standalone deployment of {productname} in debug mode. + +.Procedure + +. Enter the following command to run your standalone {productname} deployment in debug mode: ++ +[source,terminal] +---- +$ podman run -p 443:8443 -p 80:8080 -e DEBUGLOG=true -v /config:/conf/stack -v /storage:/datastorage -d {productrepo}/{quayimage}:{productminv} +---- + +. To view the debug logs, enter the following command: ++ +[source,terminal] +---- +$ podman logs +---- \ No newline at end of file diff --git a/modules/scans-not-working-behind-proxy.adoc b/modules/scans-not-working-behind-proxy.adoc new file mode 100644 index 000000000..40fd525f1 --- /dev/null +++ b/modules/scans-not-working-behind-proxy.adoc @@ -0,0 +1,34 @@ +:_content-type: PROCEDURE +[id="scans-not-working-behind-proxy"] += Clair scans are not working behind proxy + +In some cases, {productname} debug logs return a `401` error when interacting with Clair through a proxy, which suggests that {productname} is unable to communicate with Clair. For example: `securityworker stdout | 2022-11-08 14:32:52,443 [106] [DEBUG] [urllib3.connectionpool] http://192.168.xx.xx:3128 "GET http://clairv4/indexer/api/v1/index_state HTTP/1.1" 401 843 securityworker stdout | 2022-11-08 14:32:52,474 [106] [ERROR] [util.secscan.v4.api] Security scanner endpoint responded with non-200 HTTP status code: 401`. + +This issue occurs because {productname} inherited the cluster proxy configuration from {ocp} and attempted to connect with Clair through the proxy, which results in the aforementioned error code. + +To resolve this issue, remove any proxy variables from the `QuayRegistry` custom resource definition (CRD) to keep {productname} unproxied. For example: + +[source,yaml] +---- +kind: QuayRegistry +components: + - kind: quay + managed: true +overrides: + env: + - name: DEBUGLOG + value: "true" + - name: NO_PROXY + value: svc.cluster.local,localhost,quay.example.com + - name: HTTP_PROXY + value: "" + - name: HTTPS_PROXY + value: "" +---- + +You must set the proxy variables for Clair. Proxy variables can be copied from {ocp}'s `cluster proxy` file. Add the full Clair service name to `NO_PROXY` in the `QuayRegistry` CRD. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6988319[Clair scans are not working beind proxy]. \ No newline at end of file diff --git a/modules/search-api.adoc b/modules/search-api.adoc new file mode 100644 index 000000000..475f547e8 --- /dev/null +++ b/modules/search-api.adoc @@ -0,0 +1,57 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="search-api"] += Searching against registry context + +You can use `search` API endpoints to perform searches against all registry context. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#conductreposearch[`GET /api/v1/find/repositories`] endpoint to get a list of apps and repositories that match the specified query: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/find/repositories?query=&page=1&includeUsage=true" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"results": [], "has_additional": false, "page": 2, "page_size": 10, "start_index": 10} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#conductsearch[`GET /api/v1/find/all`] endpoint to get a list of entities and resources that match the specified query: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/find/all?query=" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"results": [{"kind": "repository", "title": "repo", "namespace": {"title": "user", "kind": "user", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "name": "quayadmin", "score": 1, "href": "/user/quayadmin"}, "name": "busybox", "description": null, "is_public": false, "score": 4.0, "href": "/repository/quayadmin/busybox"}]} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getmatchingentities[`GET /api/v1/entities/{prefix}`] endpoint to get a list of entities that match the specified prefix. ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/entities/?includeOrgs=&includeTeams=&namespace=" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"results": [{"name": "quayadmin", "kind": "user", "is_robot": false, "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}}]} +---- \ No newline at end of file diff --git a/modules/secrets-garbage-collected.adoc b/modules/secrets-garbage-collected.adoc new file mode 100644 index 000000000..bc3fe5e42 --- /dev/null +++ b/modules/secrets-garbage-collected.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="secrets-garbage-collected"] += Garbage collecting secrets on the {productname} Operator + +When a change to your `config.yaml` file is made, some secrets are created, but the old ones might remain in the namespace. This happens because on every reconciliation, the {productname} Operator recreates all secrets, including the PostgreSQL password, the config editor password, and the {productname} configuration itself. After a few changes, the number of secrets in the namespace grows to the point where it is difficult to tell which secret is being used, and where. Consequently, it can complicate the debugging process and cause other issues with your deployment. + +There is no automatic mechanism to prune secrets in a {productname} Operator deployment. As a workaround, you can locate and manually delete secrets that are not in use by other resources by running the following command: + +[source,terminal] +---- +$ oc delete secret +---- + + +[role="_additional-resources"] +.Additional resources + +For more information, see the following resources: + +* link:https://issues.redhat.com/browse/PROJQUAY-5172[PROJQUAY-5172]. + +* link:https://access.redhat.com/solutions/6974476[When using the Quay Operator, Secrets don't get garbage collected]. \ No newline at end of file diff --git a/modules/security-intro.adoc b/modules/security-intro.adoc index ad40c3830..880c83127 100644 --- a/modules/security-intro.adoc +++ b/modules/security-intro.adoc @@ -14,7 +14,7 @@ Clair supports the extraction of contents and assignment of vulnerabilities from * Oracle Linux * Alpine Linux * Amazon Linux -* VMWare Photon +* VMware Photon * Python diff --git a/modules/security-scanning-api.adoc b/modules/security-scanning-api.adoc new file mode 100644 index 000000000..caab437ba --- /dev/null +++ b/modules/security-scanning-api.adoc @@ -0,0 +1,24 @@ +:_content-type: CONCEPT +[id="security-scanning-api"] += View Clair security scans by using the API + +You can view Clair security scans by using the API. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepomanifestsecurity[`GET /api/v1/repository/{repository}/manifest/{manifestref}/security`] endpoint to retrieve security information about a specific manifest in a repository. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "https://quay-server.example.com/api/v1/repository///manifest//security?vulnerabilities=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"status": "queued", "data": null} +---- \ No newline at end of file diff --git a/modules/security-scanning-ui.adoc b/modules/security-scanning-ui.adoc new file mode 100644 index 000000000..80fec1bc7 --- /dev/null +++ b/modules/security-scanning-ui.adoc @@ -0,0 +1,23 @@ +:_content-type: CONCEPT +[id="security-scanning-ui"] += Viewing Clair security scans by using the UI + +You can view Clair security scans on the UI. + +.Procedure + +. Navigate to a repository and click *Tags* in the navigation pane. This page shows the results of the security scan. + +. To reveal more information about multi-architecture images, click *See Child Manifests* to see the list of manifests in extended view. + +. Click a relevant link under *See Child Manifests*, for example, *1 Unknown* to be redirected to the *Security Scanner* page. + +. The *Security Scanner* page provides information for the tag, such as which CVEs the image is susceptible to, and what remediation options you might have available. + +[NOTE] +==== +Image scanning only lists vulnerabilities found by Clair security scanner. What users do about the vulnerabilities are uncovered is up to said user. +ifeval::["{context}" == "use-quay"] +{productname} superusers do not act on found vulnerabilities. +endif::[] +==== \ No newline at end of file diff --git a/modules/security-scanning.adoc b/modules/security-scanning.adoc new file mode 100644 index 000000000..3d380a316 --- /dev/null +++ b/modules/security-scanning.adoc @@ -0,0 +1,30 @@ +:_content-type: CONCEPT +[id="security-scanning"] += Clair security scans + +ifeval::["{context}" == "quay-io"] +{quayio} comes equipped with Clair security scanner. For more information about Clair on {quayio}, see "Clair security scanner." +endif::[] +ifeval::["{context}" == "use-quay"] +Clair security scanner is not enabled for {productname} by default. To enable Clair, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/testing-clair-with-quay[Clair on {productname}]. +endif::[] + +Clair security scans can be viewed on the UI, or by the API. + +.Procedure + +. Navigate to a repository and click *Tags* in the navigation pane. This page shows the results of the security scan. + +. To reveal more information about multi-architecture images, click *See Child Manifests* to see the list of manifests in extended view. + +. Click a relevant link under *See Child Manifests*, for example, *1 Unknown* to be redirected to the *Security Scanner* page. + +. The *Security Scanner* page provides information for the tag, such as which CVEs the image is susceptible to, and what remediation options you might have available. + +[NOTE] +==== +Image scanning only lists vulnerabilities found by Clair security scanner. What users do about the vulnerabilities are uncovered is up to said user. +ifeval::["{context}" == "use-quay"] +{productname} superusers do not act on found vulnerabilities. +endif::[] +==== \ No newline at end of file diff --git a/modules/set-team-role.adoc b/modules/set-team-role.adoc new file mode 100644 index 000000000..72f428d5d --- /dev/null +++ b/modules/set-team-role.adoc @@ -0,0 +1,32 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="set-team-role"] += Setting a team role by using the UI + +After you have created a team, you can set the role of that team within the +Organization. + +.Prerequisites + +* You have created a team. + +.Procedure + +. On the {productname} landing page, click the name of your Organization. + +. In the navigation pane, click *Teams and Membership*. + +. Select the *TEAM ROLE* drop-down menu, as shown in the following figure: ++ +image:set-team-role.png[Set the role that a team has within an organization] + +. For the selected team, choose one of the following roles: ++ +* *Admin*. Full administrative access to the organization, including the ability to create teams, add members, and set permissions. +* *Member*. Inherits all permissions set for the team. +* *Creator*. All member permissions, plus the ability to create new repositories. \ No newline at end of file diff --git a/modules/setting-default-quota.adoc b/modules/setting-default-quota.adoc new file mode 100644 index 000000000..71e341708 --- /dev/null +++ b/modules/setting-default-quota.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="default-quota"] += Setting default quota + +To specify a system-wide default storage quota that is applied to every organization and user, you can use the *DEFAULT_SYSTEM_REJECT_QUOTA_BYTES* configuration flag. + +If you configure a specific quota for an organization or user, and then delete that quota, the system-wide default quota will apply if one has been set. Similarly, if you have configured a specific quota for an organization or user, and then modify the system-wide default quota, the updated system-wide default will override any specific settings. + +For more information about the `DEFAULT_SYSTEM_REJECT_QUOTA_BYTES` flag, + +//need link for 3.9 + +see link: diff --git a/modules/setting-role-of-team-within-organization-api.adoc b/modules/setting-role-of-team-within-organization-api.adoc new file mode 100644 index 000000000..ccb04b19b --- /dev/null +++ b/modules/setting-role-of-team-within-organization-api.adoc @@ -0,0 +1,54 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE + +[id="setting-role-of-team-within-organization-api"] += Setting the role of a team within an organization by using the API + +Use the following procedure to view and set the role a team within an organization using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/red_hat_quay_api_reference/index#getorganizationteampermissions[`GET /api/v1/organization/{orgname}/team/{teamname}/permissions`] command to return a list of repository permissions for the organization's team. Note that your team must have been added to a repository for this command to return information. ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//permissions" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": [{"repository": {"name": "api-repo", "is_public": true}, "role": "admin"}]} +---- + +. You can create or update a team within an organization to have a specified role of *admin*, *member*, or *creator* using the link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/red_hat_quay_api_reference/index#updateorganizationteam[`PUT /api/v1/organization/{orgname}/team/{teamname}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "role": "" + }' \ + "/api/v1/organization//team/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "testteam", "description": "", "can_view": true, "role": "creator", "avatar": {"name": "testteam", "hash": "827f8c5762148d7e85402495b126e0a18b9b168170416ed04b49aae551099dc8", "color": "#ff7f0e", "kind": "team"}, "new_team": false} +---- \ No newline at end of file diff --git a/modules/setting-tag-expiration-api.adoc b/modules/setting-tag-expiration-api.adoc new file mode 100644 index 000000000..389312d8c --- /dev/null +++ b/modules/setting-tag-expiration-api.adoc @@ -0,0 +1,32 @@ +:_content-type: CONCEPT +[id="setting-tag-expirations-api"] += Setting tag expirations by using the API + +Image tags can be set to expire by using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* You can set when an image a tag expires by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changetag[`PUT /api/v1/repository/{repository}/tag/{tag}`] command and passing in the expiration field: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "expiration": "" + }' \ + https:///api/v1/repository///tag/ +---- ++ +.Example output ++ +[source,terminal] +---- +"Updated" +---- diff --git a/modules/setting-tag-expirations-v2-ui.adoc b/modules/setting-tag-expirations-v2-ui.adoc new file mode 100644 index 000000000..ae25965b5 --- /dev/null +++ b/modules/setting-tag-expirations-v2-ui.adoc @@ -0,0 +1,190 @@ +:_content-type: CONCEPT +[id="setting-tag-expirations-v2-ui"] += Setting tag expirations + +Image tags can be set to expire from a +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +repository at a chosen date and time using the _tag expiration_ feature. This feature includes the following characteristics: + +* When an image tag expires, it is deleted from the repository. If it is the last tag for a specific image, the image is also set to be deleted. + +* Expiration is set on a per-tag basis. It is not set for a repository as a whole. + +* After a tag is expired or deleted, it is not immediately removed from the registry. This is contingent upon the allotted time designed in the _time machine_ feature, which defines when the tag is permanently deleted, or garbage collected. By default, this value is set at _14 days_, however the administrator can adjust this time to one of multiple options. Up until the point that garbage collection occurs, tags changes can be reverted. + +ifeval::["{context}" == "use-quay"] +The {productname} superuser has no special privilege related to deleting expired images from user repositories. There is no central mechanism for the superuser to gather information and act on user repositories. It is up to the owners of each repository to manage expiration and the deletion of their images. +endif::[] + +Tag expiration can be set up in one of three ways: + +* By setting the `quay.expires-after=` label in the Dockerfile when the image is created. This sets a time to expire from when the image is built. This label only works for image manifests. + +* By setting the `quay.expires-after=` annotation label in the Dockerfile when the image is created. `--annotation` can be passed in for both image manifests and image indexes. + +* By selecting an expiration date on the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. For example: ++ +image:tag-expires-ui.png[Change tag expiration under the Options icon or from the EXPIRES column] + +Setting tag expirations can help automate the cleanup of older or unused tags, helping to reduce storage space. + +[id="setting-tag-expiration-using-ui"] +== Setting tag expiration from a repository + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab for an image and select *Change expiration*. + +. Optional. Alternatively, you can bulk add expiration dates by clicking the box of multiple tags, and then select *Actions* -> *Set expiration*. + +. In the *Change Tags Expiration* window, set an expiration date, specifying the day of the week, month, day of the month, and year. For example, `Wednesday, November 15, 2023`. Alternatively, you can click the calendar button and manually select the date. + +. Set the time, for example, `2:30 PM`. + +. Click *Change Expiration* to confirm the date and time. The following notification is returned: `Successfully set expiration for tag test to Nov 15, 2023, 2:26 PM`. + +. On the {productname} v2 UI *Tags* page, you can see when the tag is set to expire. For example: ++ +image:tag-expiration-v2-ui.png[{productname} v2 UI tag expiration] + +[id="setting-tag-expiration-from-dockerfile"] +== Setting tag expiration from a Dockerfile + +You can add a label, for example, `quay.expires-after=20h` to an image tag by using the `docker label` command to cause the tag to automatically expire after the time that is indicated. The following values for hours, days, or weeks are accepted: + +* `1h` +* `2d` +* `3w` + +Expiration begins from the time that the image is pushed to the registry. + +.Procedure + +* Enter the following `docker label` command to add a label to the desired image tag. The label should be in the format `quay.expires-after=20h` to indicate that the tag should expire after 20 hours. Replace `20h` with the desired expiration time. For example: ++ +[source,terminal] +---- +$ docker label quay.expires-after=20h quay-server.example.com/quayadmin/: +---- + +[id="setting-tag-expiration-annotation"] +== Setting tag expiration using annotations + +You can add an annotation, for example, `quay.expires-after=20h`, to an image tag using the `--annotation` flag when pushing an image to the registry. This annotation causes the tag to automatically expire after the specified time. The annotation can be applies to both image manifests and image indexes. The following values for hours, days, or weeks are accepted: + +* `1h` +* `2d` +* `3w` + +Expiration begins from the time that the image is pushed to the registry. + +[NOTE] +==== +Using the `--annotation` flag is simplest using the `oras` CLI tool. +==== + +.Prerequisites + +* You have downloaded the `oras` CLI. For more information, see link:https://oras.land/docs/installation[Installation]. + +.Procedure + +. Enter the following `oras push --annotation` command to add an annotation to the desired image tag. The annotation should be in the format `quay.expires-after=` to indicate that the tag should expire the set time. For example: ++ +[source,terminal] +---- +$ oras push --annotation quay.expires-after= \ + //: \ + : +---- ++ +.Example output ++ +[source,terminal] +---- +✓ Uploaded hello.txt 12/12 B 100.00% 321ms + └─ sha256:74b9e308133afb3bceae961097cb2aa481483869d695ce1414cd2bc7f046027c +✓ Uploaded application/vnd.oci.empty.v1+json 2/2 B 100.00% 328ms + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 620/620 B 100.00% 0s + └─ sha256:c370e931b5eca44fd753bd92e6991ed3be70008e8df15078083359409111f8c3 +Pushed [registry] quay-server.example.com/fortestuser/busybox:test2 +ArtifactType: application/vnd.unknown.artifact.v1 +---- + +. Confirm that the expiration date has been applied by checking the {productname} UI, or by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag/?specificTag=" \ +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test2", "reversion": false, "start_ts": 1743706344, "end_ts": 1743778344, "manifest_digest": "sha256:c370e931b5eca44fd753bd92e6991ed3be70008e8df15078083359409111f8c3", "is_manifest_list": false, "size": 12, "last_modified": "Thu, 03 Apr 2025 18:52:24 -0000", "expiration": "Fri, 04 Apr 2025 14:52:24 -0000"}, {"name": "test2", "reversion": false, "start_ts": 1742493776, "end_ts": 1743706344, "manifest_digest": "sha256:d80aa3d7f5f5388cfae543b990d3cd3d47ff51c48ef29ff66102427bf7bc0a88", "is_manifest_list": false, "size": 2266046, "last_modified": "Thu, 20 Mar 2025 18:02:56 -0000", "expiration": "Thu, 03 Apr 2025 18:52:24 -0000"}], "page": 1, "has_additional": false} +---- + +[id="removing-tag-expiration-annotation"] +== Removing tag expiration using annotations + +With the `oras` CLI tool, you can unset previously established expiration times. + +.Prerequisites + +* You have downloaded the `oras` CLI. For more information, see link:https://oras.land/docs/installation[Installation]. +* You have pushed an image with the `quay.expires-after=` annotation. + +.Procedure + +. Enter the following `oras push --annotation` command to remove an annotation to the desired image tag. The annotation should be in the format `quay.expires-after=never`. For example: ++ +[source,terminal] +---- +$ oras push --annotation quay.expires-after=never \ + //: \ + : +---- ++ +.Example output ++ +[source,terminal] +---- +✓ Uploaded hello.txt 12/12 B 100.00% 321ms + └─ sha256:74b9e308133afb3bceae961097cb2aa481483869d695ce1414cd2bc7f046027c +✓ Uploaded application/vnd.oci.empty.v1+json 2/2 B 100.00% 328ms + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 620/620 B 100.00% 0s + └─ sha256:c370e931b5eca44fd753bd92e6991ed3be70008e8df15078083359409111f8c3 +Pushed [registry] quay-server.example.com/fortestuser/busybox:test2 +ArtifactType: application/vnd.unknown.artifact.v1 +---- + +. The latest manifest will no longer have an expiration time. Confirm that the expiration date has been removed by checking the {productname} UI, or by entering the following command: ++ +[source,terminal] +---- +{"tags": [{"name": "test2", "reversion": false, "start_ts": 1743708135, "manifest_digest": "sha256:19e3a3501b4125cce9cb6bb26ac9207c325259bef94dc66490b999f93c4c83a9", "is_manifest_list": false, "size": 12, "last_modified": "Thu, 03 Apr 2025 19:22:15 -0000"}, {"name": "test2", "reversion": false, "start_ts": 1743706344, "end_ts": 1743708135}]} +---- ++ +Note that no expiration time is listed. \ No newline at end of file diff --git a/modules/setting-up-builds-aws.adoc b/modules/setting-up-builds-aws.adoc new file mode 100644 index 000000000..7f3f4e05d --- /dev/null +++ b/modules/setting-up-builds-aws.adoc @@ -0,0 +1,41 @@ +:_content-type: PROCEDURE +[id="setting-up-builds-aws"] += {productname} Builder configuration with Amazon Elastic Compute Cloud + +{productname} can also be configured to use Amazon Elastic Compute Cloud (EC2) instances as _build worker_ nodes. This is useful for situations where you might want to have EC2 based _builds_ available as a backup solution in the event that your {ocp} _build workers_ are overloaded or unavailable. + +[NOTE] +==== +Amazon EC2 _builds_ are not supported by Red{nbsp}Hat. This is currently provided as an upstream feature only. +==== + +You can follow the steps in "Configuring bare metal builds for {productname-ocp}" and substitute the following changes in your configuration bundle to enable Amazon EC2. + +.Example configuration for bare metal builds with Amazon EC2 +[source,yaml] +---- + EXECUTORS: + - EXECUTOR: ec2 + QUAY_USERNAME: + QUAY_PASSWORD: + WORKER_IMAGE: quay.io/quay/quay-builder + WORKER_TAG: latest + EC2_REGION: us-east-1 + COREOS_AMI: ami-02545325b519192df # Fedora CoreOS <1> + AWS_ACCESS_KEY: ***** + AWS_SECRET_KEY: ***** + EC2_INSTANCE_TYPE: t2.large + EC2_VPC_SUBNET_ID: + EC2_SECURITY_GROUP_IDS: + - + EC2_KEY_NAME: + BLOCK_DEVICE_SIZE: 58 + SSH_AUTHORIZED_KEYS: <2> + - + - + HTTP_PROXY: + HTTPS_PROXY: + NO_PROXY: +---- +<1> Specifies an AMI name where _builds_ will be run. Unlike bare metal _builds_, these container _builds_ are done directly within an ephemeral EC2 instance. This AMI must utilize ignition and contain a docker. The AMI shown in this example is used by {quay.io} for its build system. +<2> Allows public SSH keys to be added to the build environment for remote troubleshooting access. This key, or keys, should correspond to the private key that an admin or developer will use to SSH into the build worker for debugging purposes. This key can be obtained by establishing an SSH connection to the remote host using a specific SSH key and port. For example: `$ ssh -i /path/to/ssh/key/set/in/ssh_authorized_keys -p 9999 core@localhost`. \ No newline at end of file diff --git a/modules/signature-does-not-exist.adoc b/modules/signature-does-not-exist.adoc new file mode 100644 index 000000000..b642dc750 --- /dev/null +++ b/modules/signature-does-not-exist.adoc @@ -0,0 +1,14 @@ +:_content-type: PROCEDURE +[id="signature-does-not-exist"] += Mirroring fails with "signature does not exist" error + +During the mirroring process in {productname}, users encounter a failure with the error message `signature does not exist`. This issue arises when attempting to mirror images and prevents their successful replication. + +The cause of this issue lies in the presence of old unsigned images on `registry.redhat.io`. Due to their lack of proper signatures, these images are unable to be mirrored successfully. + +This issue was addressed in {productname} version 3.6.4. The fix implemented in this version introduced a new checkbox named `Accept Unsigned Images` under the *Mirror Configuration* tab. Enabling this checkbox allows the mirroring process to proceed successfully, bypassing the `signature does not exist` error. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6804261[Red Hat Quay Image mirroring fails with error "signature does not exist"]. \ No newline at end of file diff --git a/modules/skipping-source-control-triggered-build.adoc b/modules/skipping-source-control-triggered-build.adoc new file mode 100644 index 000000000..a9a7f3e84 --- /dev/null +++ b/modules/skipping-source-control-triggered-build.adoc @@ -0,0 +1,11 @@ +[id="skipping-source-control-triggered-build"] += Skipping a source control-triggered build + +To specify that a commit should be ignored by the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +build system, add the text `[skip build]` or `[build skip]` anywhere in your commit message. \ No newline at end of file diff --git a/modules/ssl-config-cli.adoc b/modules/ssl-config-cli.adoc index 952ae2750..9514d7bf1 100644 --- a/modules/ssl-config-cli.adoc +++ b/modules/ssl-config-cli.adoc @@ -1,34 +1,64 @@ -= Configuring SSL using the command line -Another option when configuring SSL is to use the command line interface. +:_content-type: PROCEDURE +[id="configuring-ssl-using-cli"] += Configuring custom SSL/TLS certificates by using the command line interface + +SSL/TLS must be configured by using the command-line interface (CLI) and updating your `config.yaml` file manually. + +.Prerequisites + +* You have created a certificate authority and signed the certificate. + +.Procedure . Copy the certificate file and primary key file to your configuration directory, ensuring they are named `ssl.cert` and `ssl.key` respectively: + -``` -$ cp ~/ssl.cert $QUAY/config -$ cp ~/ssl.key $QUAY/config -$ cd $QUAY/config -``` +[source,terminal] ++ +---- +cp ~/ssl.cert ~/ssl.key /path/to/configuration_directory +---- -. Edit the `config.yaml` file and specify that you want Quay to handle TLS: +. Navigate to the configuration directory by entering the following command: + -.config.yaml +[source,terminal] +---- +$ cd /path/to/configuration_directory +---- + +. Edit the `config.yaml` file and specify that you want {productname} to handle SSL/TLS: ++ +.Example `config.yaml` file [source,yaml] ---- -... -SERVER_HOSTNAME: quay-server.example.com +# ... +SERVER_HOSTNAME: ... PREFERRED_URL_SCHEME: https -... +# ... ---- -. Stop the `Quay` container and restart the registry: + +. Optional: Append the contents of the `rootCA.pem` file to the end of the `ssl.cert` file by entering the following command: ++ +[source,terminal] +---- +$ cat rootCA.pem >> ssl.cert +---- + +. Stop the `Quay` container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Restart the registry by entering the following command: + [subs="verbatim,attributes"] ---- -$ sudo podman rm -f quay + $ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ --name=quay \ -v $QUAY/config:/conf/stack:Z \ -v $QUAY/storage:/datastorage:Z \ {productrepo}/{quayimage}:{productminv} ----- - +---- \ No newline at end of file diff --git a/modules/ssl-config-ui.adoc b/modules/ssl-config-ui.adoc index 602e9cda8..b73a20a05 100644 --- a/modules/ssl-config-ui.adoc +++ b/modules/ssl-config-ui.adoc @@ -1,17 +1,29 @@ -= Configuring SSL using the UI +:_content-type: PROCEDURE +[id="configuring-ssl-using-ui"] += Configuring SSL/TLS using the {productname} UI -This section configures SSL using the Quay UI. To configure SSL using the command line interface, see the following section. +Use the following procedure to configure SSL/TLS using the {productname} UI. + +To configure SSL/TLS using the command line interface, see "Configuring SSL/TLS using the command line interface". + +.Prerequisites + +* You have created a certificate authority and signed a certificate. + +.Procedure . Start the `Quay` container in configuration mode: + [subs="verbatim,attributes"] -``` +---- $ sudo podman run --rm -it --name quay_config -p 80:8080 -p 443:8443 {productrepo}/{quayimage}:{productminv} config secret -``` +---- + +. In the *Server Configuration* section, select *{productname} handles TLS* for SSL/TLS. Upload the certificate file and private key file created earlier, ensuring that the *Server Hostname* matches the value used when the certificates were created. -. In the Server Configuration section, select `{productname} handles TLS` for TLS. Upload the certificate file and private key file created earlier, ensuring that the Server Hostname matches the value used when creating the certs. Validate and download the updated configuration. +. Validate and download the updated configuration. -. Stop the `Quay` container and then restart the registry: +. Stop the `Quay` container and then restart the registry by entering the following command: + [subs="verbatim,attributes"] ---- diff --git a/modules/ssl-create-certs.adoc b/modules/ssl-create-certs.adoc index b3fafd0e4..a684579d0 100644 --- a/modules/ssl-create-certs.adoc +++ b/modules/ssl-create-certs.adoc @@ -1,62 +1,68 @@ -[[create-a-ca-and-sign-a-certificate]] -= Create a Certificate Authority and sign a certificate +:_content-type: PROCEDURE +[id="creating-a-certificate-authority"] += Creating a Certificate Authority -At the end of this procedure, you will have a certificate file and a primary key file named `ssl.cert` and `ssl.key`, respectively. +Use the following procedure to set up your own CA and use it to issue a server certificate for your domain. This allows you to secure communications with SSL/TLS using your own certificates. -== Create a Certificate Authority +.Procedure -. Generate the root CA key: +. Generate the root CA key by entering the following command: + -``` +[source,terminal] +---- $ openssl genrsa -out rootCA.key 2048 -``` +---- -. Generate the root CA cert: +. Generate the root CA certificate by entering the following command: + -``` +[source,terminal] +---- $ openssl req -x509 -new -nodes -key rootCA.key -sha256 -days 1024 -out rootCA.pem -``` +---- . Enter the information that will be incorporated into your certificate request, including the server hostname, for example: + -``` +[source,terminal] +---- Country Name (2 letter code) [XX]:IE State or Province Name (full name) []:GALWAY Locality Name (eg, city) [Default City]:GALWAY Organization Name (eg, company) [Default Company Ltd]:QUAY Organizational Unit Name (eg, section) []:DOCS Common Name (eg, your name or your server's hostname) []:quay-server.example.com -``` - -== Sign a certificate +---- -. Generate the server key: +. Generate the server key by entering the following command: + -``` +[source,terminal] +---- $ openssl genrsa -out ssl.key 2048 -``` +---- -. Generate a signing request: +. Generate a signing request by entering the following command: + -``` +[source,terminal] +---- $ openssl req -new -key ssl.key -out ssl.csr -``` +---- . Enter the information that will be incorporated into your certificate request, including the server hostname, for example: + -``` +[source,terminal] +---- Country Name (2 letter code) [XX]:IE State or Province Name (full name) []:GALWAY Locality Name (eg, city) [Default City]:GALWAY Organization Name (eg, company) [Default Company Ltd]:QUAY Organizational Unit Name (eg, section) []:DOCS Common Name (eg, your name or your server's hostname) []:quay-server.example.com -``` +Email Address []: +---- -. Create a configuration file `openssl.cnf`, specifying the server hostname, for example: +. Create a configuration file `openssl.cnf`, specifying the server hostname, for example: + -.openssl.cnf -[source] +.Example `openssl.cnf` file +[source,terminal] ---- [req] req_extensions = v3_req @@ -67,13 +73,28 @@ basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment subjectAltName = @alt_names [alt_names] -DNS.1 = quay-server.example.com +DNS.1 = IP.1 = 192.168.1.112 ---- - . Use the configuration file to generate the certificate `ssl.cert`: + -``` +[source,terminal] +---- $ openssl x509 -req -in ssl.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out ssl.cert -days 356 -extensions v3_req -extfile openssl.cnf -``` +---- + +. Confirm your created certificates and files by entering the following command: ++ +[source,terminal] +---- +$ ls /path/to/certificates +---- ++ +.Example output ++ +[source,terminal] +---- +rootCA.key ssl-bundle.cert ssl.key custom-ssl-config-bundle-secret.yaml rootCA.pem ssl.cert +openssl.cnf rootCA.srl ssl.csr +---- \ No newline at end of file diff --git a/modules/ssl-intro.adoc b/modules/ssl-intro.adoc index 6fe30a74a..8dff4cf0a 100644 --- a/modules/ssl-intro.adoc +++ b/modules/ssl-intro.adoc @@ -1,13 +1,4 @@ -= Introduction to using SSL +[id="introduction-using-ssl"] += Using SSL/TLS -To configure {productname} with a -https://en.wikipedia.org/wiki/Self-signed_certificate[self-signed -certificate], you need to create a Certificate Authority (CA) and then generate the required key and certificate files. - -The following examples assume you have configured the server hostname `quay-server.example.com` using DNS or another naming mechanism, such as adding an entry in your `/etc/hosts` file: - -.... -$ cat /etc/hosts -... -192.168.1.112 quay-server.example.com -.... \ No newline at end of file +Documentation for _Using SSL/TLS_ has been revised and moved to link:https://docs.redhat.com/en/documentation/red_hat_quay/3/html-single/securing_red_hat_quay/index[Securing {productname}]. This chapter will be removed in a future version of {productname}. \ No newline at end of file diff --git a/modules/ssl-testing-cli.adoc b/modules/ssl-testing-cli.adoc index b3e8da9f6..b196e3234 100644 --- a/modules/ssl-testing-cli.adoc +++ b/modules/ssl-testing-cli.adoc @@ -1,23 +1,39 @@ -= Testing SSL configuration using the command line +:_content-type: PROCEDURE +[id="testing-ssl-tls-configuration-using-cli"] += Testing the SSL/TLS configuration using the CLI -* Use the `podman login` command to attempt to log in to the Quay registry with SSL enabled: +Your SSL/TLS configuration can be tested by using the command-line interface (CLI). Use the following procedure to test your SSL/TLS configuration. + +Use the following procedure to test your SSL/TLS configuration using the CLI. + +.Procedure + +. Enter the following command to attempt to log in to the {productname} registry with SSL/TLS enabled: + -``` +[source,terminal] +---- $ sudo podman login quay-server.example.com -Username: quayadmin -Password: - +---- ++ +.Example output ++ +[source,terminal] +---- Error: error authenticating creds for "quay-server.example.com": error pinging docker registry quay-server.example.com: Get "https://quay-server.example.com/v2/": x509: certificate signed by unknown authority -``` +---- -* Podman does not trust self-signed certificates. As a workaround, use the `--tls-verify` option: +. Because Podman does not trust self-signed certificates, you must use the `--tls-verify=false` option: + -``` +[source,terminal] +---- $ sudo podman login --tls-verify=false quay-server.example.com -Username: quayadmin -Password: - +---- ++ +.Example output ++ +[source,terminal] +---- Login Succeeded! -``` - -Configuring Podman to trust the root Certificate Authority (CA) is covered in a subsequent section. +---- ++ +In a subsequent section, you will configure Podman to trust the root Certificate Authority. \ No newline at end of file diff --git a/modules/ssl-testing-ui.adoc b/modules/ssl-testing-ui.adoc index 4df150d2b..22360312b 100644 --- a/modules/ssl-testing-ui.adoc +++ b/modules/ssl-testing-ui.adoc @@ -1,11 +1,17 @@ -= Testing SSL configuration using the browser +:_content-type: PROCEDURE +[id="testing-ssl-tls-using-browser"] += Testing the SSL/TLS configuration using a browser -When you attempt to access the Quay registry, in this case, `https://quay-server.example.com`, the browser warns of the potential risk: +Use the following procedure to test your SSL/TLS configuration using a browser. -image:ssl-connection-not-private.png[Potential risk] +.Procedure -Proceed to the log in screen, and the browser will notify you that the connection is not secure: +. Navigate to your {productname} registry endpoint, for example, `https://quay-server.example.com`. If configured correctly, the browser warns of the potential risk: ++ +image:ssl-connection-not-private.png[Potential risk] +. Proceed to the log in screen. The browser notifies you that the connection is not secure. For example: ++ image:ssl-connection-not-secure.png[Connection not secure] - -Configuring the system to trust the root Certificate Authority (CA) is covered in the subsequent section. ++ +In the following section, you will configure Podman to trust the root Certificate Authority. \ No newline at end of file diff --git a/modules/ssl-tls-quay-overview.adoc b/modules/ssl-tls-quay-overview.adoc new file mode 100644 index 000000000..6fdaf4a6b --- /dev/null +++ b/modules/ssl-tls-quay-overview.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="ssl-tls-quay-overview"] += SSL and TLS for {productname} + +The Secure Sockets Layer (SSL) protocol was originally developed by Netscape Corporation to provide a mechanism for secure communication over the Internet. Subsequently, the protocol was adopted by the Internet Engineering Task Force (IETF) and renamed to Transport Layer Security (TLS). + +TLS (Transport Layer Security) is a cryptographic protocol used to secure network communications. When hardening system security settings by configuring preferred key-exchange protocols, authentication methods, and encryption algorithms, it is necessary to bear in mind that the broader the range of supported clients, the lower the resulting security. Conversely, strict security settings lead to limited compatibility with clients, which can result in some users being locked out of the system. Be sure to target the strictest available configuration and only relax it when it is required for compatibility reasons. + +{productname} can be configured to use SSL/TLS certificates to ensure secure communication between clients and the {productname} server. This configuration involves the use of valid SSL/TLS certificates, which can be obtained from a trusted Certificate Authority (CA) or generated as self-signed certificates for internal use. \ No newline at end of file diff --git a/modules/ssl-tls-sql.adoc b/modules/ssl-tls-sql.adoc new file mode 100644 index 000000000..2859ce59f --- /dev/null +++ b/modules/ssl-tls-sql.adoc @@ -0,0 +1,7 @@ +:_content-type: PROCEDURE +[id="cert-based-auth-quay-sql"] += Certificate-based authentication between {productname} and SQL + +{productname} administrators can configure certificate-based authentication between {productname} and SQL (PostgreSQL and GCP CloudSQL) by supplying their own SSL/TLS certificates for client-side authentication. This provides enhanced security and allows for easier automation for your {productname} registry. + +The following sections shows you how to configure certificate-based authentication between {productname} and PostgreSQL, and {productname} and CloudSQL. \ No newline at end of file diff --git a/modules/ssl-trust-ca-podman.adoc b/modules/ssl-trust-ca-podman.adoc index d1f26fc53..1c1714854 100644 --- a/modules/ssl-trust-ca-podman.adoc +++ b/modules/ssl-trust-ca-podman.adoc @@ -1,24 +1,28 @@ -= Configuring podman to trust the Certificate Authority +:_content-type: PROCEDURE +[id="configuring-podman-trust-ca"] += Configuring Podman to trust the Certificate Authority -Podman uses two paths to locate the CA file, namely, `/etc/containers/certs.d/` and `/etc/docker/certs.d/`. +Podman uses two paths to locate the Certificate Authority (CA) file: `/etc/containers/certs.d/` and `/etc/docker/certs.d/`. Use the following procedure to configure Podman to trust the CA. -* Copy the root CA file to one of these locations, with the exact path determined by the server hostname, and naming the file `ca.crt`: +.Procedure + +. Copy the root CA file to one of `/etc/containers/certs.d/` or `/etc/docker/certs.d/`. Use the exact path determined by the server hostname, and name the file `ca.crt`: + -``` +[source,terminal] +---- $ sudo cp rootCA.pem /etc/containers/certs.d/quay-server.example.com/ca.crt -``` +---- -* Alternatively, if you are using Docker, you can copy the root CA file to the equivalent Docker directory: +. Verify that you no longer need to use the `--tls-verify=false` option when logging in to your {productname} registry: + -``` -$ sudo cp rootCA.pem /etc/docker/certs.d/quay-server.example.com/ca.crt -``` - -You should no longer need to use the `--tls-verify=false` option when logging in to the registry: -``` +[source,terminal] +---- $ sudo podman login quay-server.example.com - -Username: quayadmin -Password: +---- ++ +.Example output ++ +[source,terminal] +---- Login Succeeded! -``` +---- \ No newline at end of file diff --git a/modules/ssl-trust-ca-system.adoc b/modules/ssl-trust-ca-system.adoc index 21fee5d3c..dfcb82759 100644 --- a/modules/ssl-trust-ca-system.adoc +++ b/modules/ssl-trust-ca-system.adoc @@ -1,35 +1,52 @@ +:_content-type: PROCEDURE +[id="configuring-system-trust-ca"] = Configuring the system to trust the certificate authority -. Copy the root CA file to the consolidated system-wide trust store: +Use the following procedure to configure your system to trust the certificate authority. + +.Procedure + +. Enter the following command to copy the `rootCA.pem` file to the consolidated system-wide trust store: + -``` +[source,terminal] +---- $ sudo cp rootCA.pem /etc/pki/ca-trust/source/anchors/ -``` +---- -. Update the system-wide trust store configuration: +. Enter the following command to update the system-wide trust store configuration: + -``` +[source,terminal] +---- $ sudo update-ca-trust extract -``` +---- -. You can use the `trust list` command to ensure that the Quay server has been configured: +. Optional. You can use the `trust list` command to ensure that the `Quay` server has been configured: + -``` +[source,terminal] +---- $ trust list | grep quay label: quay-server.example.com -``` +---- + Now, when you browse to the registry at `https://quay-server.example.com`, the lock icon shows that the connection is secure: + image:ssl-connection-secure.png[Connection not secure] -. To remove the root CA from system-wide trust, delete the file and update the configuration: +. To remove the `rootCA.pem` file from system-wide trust, delete the file and update the configuration: + -``` +[source,terminal] +---- $ sudo rm /etc/pki/ca-trust/source/anchors/rootCA.pem +---- ++ +[source,terminal] +---- $ sudo update-ca-trust extract +---- ++ +[source,terminal] +---- $ trust list | grep quay -$ -``` +---- -More information can be found in the RHEL 8 documentation in the chapter https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/using-shared-system-certificates_security-hardening[Using shared system certificates]. +More information can be found in the RHEL 9 documentation in the chapter link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/securing_networks/index#using-shared-system-certificates_securing-networks[Using shared system certificates]. diff --git a/modules/standalone-georepl-site-removal.adoc b/modules/standalone-georepl-site-removal.adoc new file mode 100644 index 000000000..6055f424d --- /dev/null +++ b/modules/standalone-georepl-site-removal.adoc @@ -0,0 +1,105 @@ +:_content-type: PROCEDURE +[id="standalone-georepl-site-removal"] += Removing a geo-replicated site from your standalone {productname} deployment + +By using the following procedure, {productname} administrators can remove sites in a geo-replicated setup. + +.Prerequisites + +* You have configured {productname} geo-replication with at least two sites, for example, `usstorage` and `eustorage`. +* Each site has its own Organization, Repository, and image tags. + +.Procedure + +. Sync the blobs between all of your defined sites by running the following command: ++ +[source,terminal] +---- +$ python -m util.backfillreplication +---- ++ +[WARNING] +==== +Prior to removing storage engines from your {productname} `config.yaml` file, you *must* ensure that all blobs are synced between all defined sites. Complete this step before proceeding. +==== + +. In your {productname} `config.yaml` file for site `usstorage`, remove the `DISTRIBUTED_STORAGE_CONFIG` entry for the `eustorage` site. + +. Enter the following command to obtain a list of running containers: ++ +[source,terminal] +---- +$ podman ps +---- ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +92c5321cde38 registry.redhat.io/rhel8/redis-5:1 run-redis 11 days ago Up 11 days ago 0.0.0.0:6379->6379/tcp redis +4e6d1ecd3811 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 33 seconds ago Up 34 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +d2eadac74fda registry-proxy.engineering.redhat.com/rh-osbs/quay-quay-rhel8:v3.9.0-131 registry 4 seconds ago Up 4 seconds ago 0.0.0.0:80->8080/tcp, 0.0.0.0:443->8443/tcp quay +---- + +. Enter the following command to execute a shell inside of the PostgreSQL container: ++ +[source,terminal] +---- +$ podman exec -it postgresql-quay -- /bin/bash +---- + +. Enter psql by running the following command: ++ +[source,terminal] +---- +bash-4.4$ psql +---- + +. Enter the following command to reveal a list of sites in your geo-replicated deployment: ++ +[source,terminal] +---- +quay=# select * from imagestoragelocation; +---- ++ +.Example output ++ +[source,terminal] +---- + id | name +----+------------------- + 1 | usstorage + 2 | eustorage +---- + +. Enter the following command to exit the postgres CLI to re-enter bash-4.4: ++ +[source,terminal] +---- +\q +---- + +. Enter the following command to permanently remove the `eustorage` site: ++ +[IMPORTANT] +==== +The following action cannot be undone. Use with caution. +==== ++ +[source,terminal] +---- +bash-4.4$ python -m util.removelocation eustorage +---- ++ +.Example output ++ +[source,terminal] +---- +WARNING: This is a destructive operation. Are you sure you want to remove eustorage from your storage locations? [y/n] y +Deleted placement 30 +Deleted placement 31 +Deleted placement 32 +Deleted placement 33 +Deleted location eustorage +---- \ No newline at end of file diff --git a/modules/standalone-to-operator-backup-restore.adoc b/modules/standalone-to-operator-backup-restore.adoc index 4965104df..c6b81b42b 100644 --- a/modules/standalone-to-operator-backup-restore.adoc +++ b/modules/standalone-to-operator-backup-restore.adoc @@ -1,14 +1,15 @@ -= Migrating a standalone Quay deployment to a {productname} Operator managed deployment +:_content-type: REFERENCE +[id="migrating-standalone-quay-to-operator"] += Migrating a standalone {productname} deployment to a {productname} Operator deployment The following procedures allow you to back up a standalone {productname} deployment and migrate it to the {productname} Operator on OpenShift Container Platform. +[id="backing-up-standalone-deployment"] == Backing up a standalone deployment of {productname} -.Prerequisites - .Procedure -. Back up the Quay `config.yaml` of your standalone deployment: +. Back up the `config.yaml` of your standalone {productname} deployment: + [source,terminal] ---- @@ -16,7 +17,7 @@ $ mkdir /tmp/quay-backup $ cp /path/to/Quay/config/directory/config.yaml /tmp/quay-backup ---- -. Create a backup of the database that your standalone Quay deployment is using: +. Create a backup of the database that your standalone {productname} deployment is using: + [source,terminal] ---- @@ -32,7 +33,7 @@ $ pg_dump -h DB_HOST -p 5432 -d QUAY_DATABASE_NAME -U QUAY_DATABASE_USER -W -O > $ mkdir ~/.aws/ ---- -. Obtain the `access_key` and `secret_key` from the Quay `config.yaml` of your standalone deployment: +. Obtain the `access_key` and `secret_key` from the `config.yaml` of your standalone deployment: + [source,terminal] ---- @@ -55,7 +56,7 @@ DISTRIBUTED_STORAGE_CONFIG: storage_path: /datastorage/registry ---- -. Store the `access_key` and `secret_key` from the Quay `config.yaml` file in your `~/.aws` directory: +. Store the `access_key` and `secret_key` from the `config.yaml` file in your `~/.aws` directory: + [source,terminal] ---- @@ -102,18 +103,18 @@ $ aws s3 sync --no-verify-ssl --endpoint-url https://PUBLIC_S3_ENDPOINT:PORT s3: + [NOTE] ==== -The `PUBLIC_S3_ENDPOINT` can be read from the Quay `config.yaml` file under `hostname` in the `DISTRIBUTED_STORAGE_CONFIG`. If the endpoint is insecure, use `http` instead of `https` in the endpoint URL. +The `PUBLIC_S3_ENDPOINT` can be read from the {productname} `config.yaml` file under `hostname` in the `DISTRIBUTED_STORAGE_CONFIG`. If the endpoint is insecure, use `http` instead of `https` in the endpoint URL. ==== -Up to this point, you should have a complete backup of all Quay data, blobs, the database, and the `config.yaml` file stored locally. In the following section, you will migrate the standalone deployment backup to {productname} on OpenShift Container Platform. +Up to this point, you should have a complete backup of all {productname} data, blobs, the database, and the `config.yaml` file stored locally. In the following section, you will migrate the standalone deployment backup to {productname} on OpenShift Container Platform. +[id="using-standalone-content-migrate-ocp"] == Using backed up standalone content to migrate to OpenShift Container Platform. - .Prerequisites * Your standalone {productname} data, blobs, database, and `config.yaml` have been backed up. -* {productname} is deployed on OpenShift Container Platform using the Quay Operator. +* {productname} is deployed on OpenShift Container Platform using the {productname} Operator. * A `QuayRegistry` with all components set to `managed`. .Procedure @@ -137,7 +138,7 @@ $ oc scale --replicas=0 deployment quay-operator.v3.6.2 -n openshift-operators $ oc scale --replicas=0 deployment QUAY_MAIN_APP_DEPLOYMENT QUAY_MIRROR_DEPLOYMENT ---- -. Copy the database SQL backup to the Quay PostgreSQL database instance: +. Copy the database SQL backup to the `Quay` PostgreSQL database instance: + [source,terminal] ---- @@ -229,7 +230,7 @@ Example output: You are now connected to database "example-restore-registry-quay-database" as user "postgres". ---- -. Create a `pg_trmg` extension of your Quay database: +. Create a `pg_trmg` extension of your `Quay` database: + [source,terminal] ---- @@ -300,7 +301,7 @@ You must manually copy all the LDAP, OIDC and other information and add it to th $ oc create secret generic new-custom-config-bundle --from-file=config.yaml=/tmp/quay-backup/config-bundle.yaml ---- -. Scale up the Quay pods: +. Scale up the `Quay` pods: + ---- $ oc scale --replicas=1 deployment quayregistry-quay-app @@ -322,7 +323,7 @@ $ oc patch quayregistry QUAY_REGISTRY_NAME --type=merge -p '{"spec":{"configBund + [NOTE] ==== -If Quay returns a `500` internal server error, you might have to update the `location` of your `DISTRIBUTED_STORAGE_CONFIG` to `default`. +If {productname} returns a `500` internal server error, you might have to update the `location` of your `DISTRIBUTED_STORAGE_CONFIG` to `default`. ==== . Create a new AWS `credentials.yaml` in your `/.aws/` directory and include the `access_key` and `secret_key` from the Operator-created `config.yaml` file: @@ -372,4 +373,4 @@ $ aws s3 sync --no-verify-ssl --endpoint-url https://NOOBAA_PUBLIC_S3_ROUTE /tmp $ oc scale –replicas=1 deployment quay-operator.v3.6.4 -n openshift-operators ---- -The Operator will use the custom configuration bundle provided and will reconcile all secrets and deployments. Your new Quay deployment on OpenShift Container Platform should contain all of the information that the old deployment had. All images should be pull-able. +The Operator uses the custom configuration bundle provided and reconciles all secrets and deployments. Your new {productname} deployment on {ocp} should contain all of the information that the old deployment had. You should be able to pull all images. \ No newline at end of file diff --git a/modules/starting-a-build.adoc b/modules/starting-a-build.adoc new file mode 100644 index 000000000..7c256269c --- /dev/null +++ b/modules/starting-a-build.adoc @@ -0,0 +1,68 @@ +:_content-type: CONCEPT +[id="starting-a-build"] += Starting a new build + +ifeval::["{context}" == "quay-io"] +By default, {quayio} users can start new _builds_ out-of-the-box. +endif::[] + +ifeval::["{context}" == "quay-builders-image-automation"] +After you have enabled the {productname} _builds_ feature by configuring your deployment, you can start a new build by invoking a _build trigger_ or by uploading a Dockerfile. +endif::[] + +Use the following procedure to start a new _build_ by uploading a Dockerfile. For information about creating a _build trigger_, see "Build triggers". + +.Prerequisites + +* You have navigated to the *Builds* page of your repository. +ifeval::["{context}" == "quay-builders-image-automation"] +* You have configured your environment to use the _build_ feature. +endif::[] + +.Procedure + +. On the *Builds* page, click *Start New Build*. + +. When prompted, click *Upload Dockerfile* to upload a Dockerfile or an archive that contains a Dockerfile at the root directory. + +. Click *Start Build*. ++ +[NOTE] +==== +* Currently, users cannot specify the Docker build context when manually starting a build. +* Currently, BitBucket is unsupported on the {productname} v2 UI. +==== + +. You are redirected to the _build_, which can be viewed in real-time. Wait for the Dockerfile _build_ to be completed and pushed. + +. Optional. you can click *Download Logs* to download the logs, or *Copy Logs* to copy the logs. + +. Click the back button to return to the *Repository Builds* page, where you can view the _build history_. ++ +image:build-history.png[Build history v2 UI] + +ifeval::["{context}" == "quay-builders-image-automation"] +. You can check the status of your _build_ by clicking the commit in the *Build History* page, or by running the following command: ++ +---- +$ oc get pods -n virtual-builders +---- ++ +.Example output +---- +NAME READY STATUS RESTARTS AGE +f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s +---- + +. After the _build_ has completed, the `oc get pods -n virtual-builders` command returns no resources: ++ +[source,terminal] +---- +$ oc get pods -n virtual-builders +---- ++ +.Example output +---- +No resources found in virtual-builders namespace. +---- +endif::[] \ No newline at end of file diff --git a/modules/storage-buckets-not-synced.adoc b/modules/storage-buckets-not-synced.adoc new file mode 100644 index 000000000..e4cb75b22 --- /dev/null +++ b/modules/storage-buckets-not-synced.adoc @@ -0,0 +1,14 @@ +:_content-type: PROCEDURE +[id="storage-buckets-not-synced"] += Unsynced storage buckets in a geo-replication environment + +In some cases, your s3 buckets might differ in size and the number of objects. This occurs because, over a period of time, {productname} registries are deleted. However, within {productname} there is no mechanism to ensure that a delete image is entirety removed from the backing storage. Because of this, it is likely that many layers of such images are still in the backing storage and causing inconsistencies in all backing stores. + +`Replicationworkers` from the backfill script might take some time to catch up with the latest tasks, especially when images are consistently being pushed and new layers are being added to the registry. A difference in the size of back s3 storage is common and not problematic. However, in rare cases, it might lead to failed pulls due to layers of an image not being present in the `imagestoragelocation` table. + +Currently, there is no workaround for this issue. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/7010202[In Quay Geo-Replication, Storage Buckets not synced]. \ No newline at end of file diff --git a/modules/storage-health-check-geo-repl.adoc b/modules/storage-health-check-geo-repl.adoc new file mode 100644 index 000000000..c05156e47 --- /dev/null +++ b/modules/storage-health-check-geo-repl.adoc @@ -0,0 +1,86 @@ +:_content-type: PROCEDURE +[id="storage-health-check-geo-repl"] += Geo-replication storage health check issues + +There is a known issue when running a three-site geo-replication {productname} environment. When one of the three sites goes down due to storage failure, restarting the `Quay` pods in the remaining two sites causes {productname} to shut down. + +When checking the status of your geo-replication environment, the `GET /health/endtoend` health check endpoint does not check distributed storage engines. It only checks the preferred storage engine. + +This is the expected behavior, however here are two workarounds for this issue. + +[id="adding-overrides-to-quayregistry-crd"] +== Workaround 1: Adding overrides to the QuayRegistry CRD + +Use the following procedure to add overrides to the `QuayRegistry` CRD. Overriding the `QuayRegistry` custom resource definition (CRD) disables the initial validation. + +.Procedure + +[IMPORTANT] +==== +The overrides field is potentially destructive and should be removed from your `QuayRegistry` CRD as soon as possible. +==== + +* Update your `QuayRegistry` CRD to include the following information: ++ +[source,yaml] +---- +spec: +- kind: quay + managed: true + overrides: + env: + - name: IGNORE_VALIDATION + value: "true" <1> +---- +<1> `value` is a boolean and must be in quotation marks. This forces {productname} to restart. This restart also runs the config tool as the first process which does a health check on the configuration and ensures that all components that {productname} hooks to are available. + +[id="remove-offending-storage-engine"] +== Workaround 2: Removing the offending storage engine + +Another workaround is to remove the storage engine that is failing. To successfully remove a certain storage engine, you must remove the storage name, driver and all related parameters to that driver from the {productname} `config.yaml` file. Also remove the storage driver name from the `DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS` and `DISTRIBUTED_STORAGE_PREFERENCE` fields. For example: + +[source,yaml] +---- +... +DISTRIBUTED_STORAGE_CONFIG: + default: # storage name + - RadosGWStorage # storage driver + - access_key: minioadmin # driver parameters + bucket_name: quay + hostname: 10.0.0.1 + is_secure: false + port: "9000" + secret_key: minioadmin + storage_path: /datastorage/registry + swift: # storage name + - SwiftStorage # storage driver + - auth_url: http://10.0.50.50/identity # driver parameters + auth_version: "3" + os_options: + tenant_id: + user_domain_name: +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: + - default + - swift +DISTRIBUTED_STORAGE_PREFERENCE: + - default + - swift +... +---- + +Removing a faulty storage engine includes the following conditions: + +* This change must be done on all {productname} instances that you are running. The `Quay` pods should come online afterwards. +* Image that are completely stored in the failed data center will not be pullable. +* Geo-replication is an asynchronous operation, it happens in batches and after the image has been completely pushed to the registry. There is no guarantee that all blobs for all images pushed to the failed data center were transferred to other storage locations in time. If such an image is encountered, it should be re-pushed to {productname} again. +* After the failed storage engine has been restored, the configuration for that storage engine should be restored to remaining 2 {productname} instances and {productname} should be restarted. One needs to enqueue blobs that are now in the remaining two data centers to be pushed to the failed data center. This can be done with the following script: ++ +[source,terminal] +---- +$ oc exec -it quay-pod-name -- pythom -m util.backfillreplication +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/7010204[Geo-replication storage health-check issue] \ No newline at end of file diff --git a/modules/storage-troubleshooting-issues.adoc b/modules/storage-troubleshooting-issues.adoc new file mode 100644 index 000000000..6e838bdb1 --- /dev/null +++ b/modules/storage-troubleshooting-issues.adoc @@ -0,0 +1,31 @@ +:_content-type: PROCEDURE +[id="storage-troubleshooting-issues"] += Troubleshooting {productname} object storage issues + +Use the following options to troubleshoot {productname} object storage issues. + +.Procedure + +* Enter the following command to see what object storage is used: ++ +[source,terminal] +---- +$ oc get quayregistry quay-registry-name -o yaml +---- + +* Ensure that the object storage you are using is officially supported by {productname} by checking the link:https://access.redhat.com/articles/4067991[tested integrations] page. + +* Enable debug mode. For more information, see "Running {productname} in debug mode". + +* Check your object storage configuration in your `config.yaml` file. Ensure that it is accurate and matches the settings provided by your object storage provider. You can check information like access credentials, endpoint URLs, bucket and container names, and other relevant configuration parameters. + +* Ensure that {productname} has network connectivity to the object storage endpoint. Check the network configurations to ensure that there are no restrictions blocking the communication between {productname} and the object storage endpoint. + +* If `FEATURE_STORAGE_PROXY` is enabled in your `config.yaml` file, check to see if its download URL is accessible. This can be found in the {productname} debug logs. For example: ++ +[source,terminal] +---- +$ curl -vvv "https://QUAY_HOSTNAME/_storage_proxy/dhaWZKRjlyO......Kuhc=/https/quay.hostname.com/quay-test/datastorage/registry/sha256/0e/0e1d17a1687fa270ba4f52a85c0f0e7958e13d3ded5123c3851a8031a9e55681?AWSAccessKeyId=xxxx&Signature=xxxxxx4%3D&Expires=1676066703" +---- + +* Try access the object storage service outside of {productname} to determine if the issue is specific to your deployment, or the underlying object storage. You can use command line tools like `aws`, `gsutil`, or `s3cmd` provided by the object storage provider to perform basic operations like listing buckets, containers, or uploading and downloading objects. This might help you isolate the problem. diff --git a/modules/storage-troubleshooting.adoc b/modules/storage-troubleshooting.adoc new file mode 100644 index 000000000..108401786 --- /dev/null +++ b/modules/storage-troubleshooting.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="storage-troubleshooting"] += Troubleshooting {productname} object storage + +Object storage is a type of data storage architecture that manages data as discrete units called `objects`. Unlike traditional file systems that organize data into hierarchical directories and files, object storage treats data as independent entities with unique identifiers. Each object contains the data itself, along with metadata that describes the object and enables efficient retrieval. + +{productname} uses object storage as the underlying storage mechanism for storing and managing container images. It stores container images as individual objects. Each container image is treated as an object, with its own unique identifier and associated metadata. \ No newline at end of file diff --git a/modules/superuser-manage-api.adoc b/modules/superuser-manage-api.adoc new file mode 100644 index 000000000..ab3e93c51 --- /dev/null +++ b/modules/superuser-manage-api.adoc @@ -0,0 +1,4 @@ +[id="superuser-manage-api"] += Managing your deployment as a superuser with the {productname} API + +Through the {productname} UI, superusers have the ability to create, list, change, and delete aspects of the registry, such as users, service keys, a user's quota, and more. \ No newline at end of file diff --git a/modules/team-permissions-api.adoc b/modules/team-permissions-api.adoc new file mode 100644 index 000000000..d73a5fa17 --- /dev/null +++ b/modules/team-permissions-api.adoc @@ -0,0 +1,70 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="repo-manage-team-permissions"] += Managing team permissions by using the {productname} API + +Use the following procedure to manage team permissions by using the {productname} API. + +. Permissions for a specified team can be returned by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getteampermissions[`GET /api/v1/repository/{repository}/permissions/team/{teamname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"role": "write"} +---- + +. Permissions for all teams can be returned with the link:[`GET /api/v1/repository/{repository}/permissions/team/`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": {"ironmanteam": {"role": "read", "name": "ironmanteam", "avatar": {"name": "ironmanteam", "hash": "8045b2361613622183e87f33a7bfc54e100a41bca41094abb64320df29ef458d", "color": "#969696", "kind": "team"}}, "sillyteam": {"role": "read", "name": "sillyteam", "avatar": {"name": "sillyteam", "hash": "f275d39bdee2766d2404e2c6dbff28fe290969242e9fcf1ffb2cde36b83448ff", "color": "#17becf", "kind": "team"}}}} +---- + +. Permissions for a specified team can be changed by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeteampermissions[`PUT /api/v1/repository/{repository}/permissions/team/{teamname}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": ""}' \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"role": "admin", "name": "superteam", "avatar": {"name": "superteam", "hash": "48cb6d114200039fed5c601480653ae7371d5a8849521d4c3bf2418ea013fc0f", "color": "#9467bd", "kind": "team"}} +---- + +. Team permissions can be deleted with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteteampermissions[`DELETE /api/v1/repository/{repository}/permissions/team/{teamname}`] command. For example: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/teams-overview.adoc b/modules/teams-overview.adoc new file mode 100644 index 000000000..48aa0ebe5 --- /dev/null +++ b/modules/teams-overview.adoc @@ -0,0 +1,11 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="teams-overview"] += {productname} teams overview + +In {productname} a _team_ is a group of users with shared permissions, allowing for efficient management and collaboration on projects. Teams can help streamline access control and project management within organizations and repositories. They can be assigned designated permissions and help ensure that members have the appropriate level of access to their repositories based on their roles and responsibilities. diff --git a/modules/tenancy-model.adoc b/modules/tenancy-model.adoc index ef699dd23..74ed0a2e8 100644 --- a/modules/tenancy-model.adoc +++ b/modules/tenancy-model.adoc @@ -1,12 +1,22 @@ -[[tenancy-model]] -= {productname} tenancy model +// module included in the following assemblies: -image:178_Quay_architecture_0821_tenancy_model.png[Quay tenancy model] +// * use_quay/master.adoc +// * quay_io/master.adoc -* **Organizations** provide a way of sharing repositories under a common namespace which does not belong to a single user, but rather to many users in a shared setting (such as a company). -* **Teams** provide a way for an organization to delegate permissions (both global and on specific repositories) to sets or groups of users. -* **Users** can log in to a registry through the {productname} web UI or a client (such as `podman login`). Each user automatically gets a user namespace, for example, `quay-server.example.com/user/`. -* **Super users** have enhanced access and privileges via the Super User Admin Panel in the user interface and through Super User API calls that are not visible or accessible to normal users. -* **Robot accounts** provide automated access to repositories for non-human users such as pipeline tools and are similar in nature to OpenShift service accounts. Permissions can be granted to a robot account in a repository by adding that account like any other user or team. +:_content-type: CONCEPT +[id="tenancy-model"] += Tenancy model +image:178_Quay_architecture_0821_tenancy_model.png[Tenancy model] +* **Organizations** provide a way of sharing repositories under a common namespace that does not belong to a single user. Instead, these repositories belong to several users in a shared setting, such as a company. + +* **Teams** provide a way for an Organization to delegate permissions. Permissions can be set at the global level (for example, across all repositories), or on specific repositories. They can also be set for specific sets, or groups, of users. + +* **Users** can log in to a registry through the web UI or a by using a client like Podman and using their respective login commands, for example, `$ podman login`. Each user automatically gets a user namespace, for example, `//`, or `quay.io/` if you are using {quayio}. + +ifeval::["{context}" == "use-quay"] +* **Superusers** have enhanced access and privileges through the *Super User Admin Panel* in the user interface. Superuser API calls are also available, which are not visible or accessible to normal users. +endif::[] + +* **Robot accounts** provide automated access to repositories for non-human users like pipeline tools. Robot accounts are similar to {ocp} *Service Accounts*. Permissions can be granted to a robot account in a repository by adding that account like you would another user or team. \ No newline at end of file diff --git a/modules/testing-3-800.adoc b/modules/testing-3-800.adoc index 7153a3b69..dc04cb487 100644 --- a/modules/testing-3-800.adoc +++ b/modules/testing-3-800.adoc @@ -473,7 +473,7 @@ $ curl -X DELETE -H "Authorization: Bearer " http://quay-server. + [NOTE] ==== -For more information about obtaining OAuth tokens, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/red_hat_quay_api_guide/using_the_red_hat_quay_api#create_oauth_access_token[Create OAuth access token]. +For more information about obtaining OAuth tokens, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/red_hat_quay_api_guide/using_the_red_hat_quay_api#create_oauth_access_token[Create OAuth access token]. ==== [[enabling-feature-restricted-users]] @@ -900,7 +900,7 @@ $ podman pull quay-server.example.com/proxytest/projectquay/clair:4.1.5 * Click *Tags* in the navigation pane and ensure that `clair:4.2.3` and `clair:4.1.5` are tagged. -. Pull the last image that will result in your repository exceeding the the allotted quota, for example: +. Pull the last image that will result in your repository exceeding the allotted quota, for example: + ---- $ podman pull quay-server.example.com/proxytest/projectquay/clair:4.1.4 diff --git a/modules/testing-oci-support.adoc b/modules/testing-oci-support.adoc new file mode 100644 index 000000000..20f35563d --- /dev/null +++ b/modules/testing-oci-support.adoc @@ -0,0 +1,177 @@ +:_content-type: CONCEPT +[id="attaching-referrers-image-tag"] += Attaching referrers to an image tag + +The following procedure shows you how to attach referrers to an image tag using different schemas supported by the OCI distribution spec 1.1 using the `oras` CLI. This is useful for attaching and managing additional metadata like referrers to container images. + +.Prerequisites + +* You have downloaded the `oras` CLI. For more information, see link:https://oras.land/docs/installation[Installation]. +* You have access to an OCI media artifact. + +.Procedure + +. Tag an OCI media artifact by entering the following command: ++ +[source,terminal] +---- +$ podman tag ///: +---- + +. Push the artifact to your {productname} registry. For example: ++ +[source,terminal] +---- +$ podman push ///: +---- + +. Enter the following command to attach a manifest using the OCI 1.1 referrers `API` schema with `oras`: ++ +[source,terminal] +---- +$ oras attach --artifact-type --distribution-spec v1.1-referrers-api \ +///: \ +.txt +---- ++ +.Example output ++ +[source,terminal] +---- +-spec v1.1-referrers-api quay.io/testorg3/myartifact-image:v1.0 hi.txt +✓ Exists hi.txt 3/3 B 100.00% 0s + └─ sha256:98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 +✓ Exists application/vnd.oci.empty.v1+json 2/2 B 100.00% 0s + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 723/723 B 100.00% 677ms + └─ sha256:31c38e6adcc59a3cfbd2ef971792aaf124cbde8118e25133e9f9c9c4cd1d00c6 +Attached to [registry] quay.io/testorg3/myartifact-image@sha256:db440c57edfad40c682f9186ab1c1075707ce7a6fdda24a89cb8c10eaad424da +Digest: sha256:31c38e6adcc59a3cfbd2ef971792aaf124cbde8118e25133e9f9c9c4cd1d00c6 +---- + +. Enter the following command to attach a manifest using the OCI 1.1 referrers `tag` schema: ++ +[source,terminal] +---- +$ oras attach --artifact-type --distribution-spec v1.1-referrers-tag \ + ///: \ +.txt +---- ++ +.Example output ++ +[source,terminal] +---- +✓ Exists hi.txt 3/3 B 100.00% 0s + └─ sha256:98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 +✓ Exists application/vnd.oci.empty.v1+json 2/2 B 100.00% 0s + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 723/723 B 100.00% 465ms + └─ sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383 +Attached to [registry] quay.io/testorg3/myartifact-image@sha256:db440c57edfad40c682f9186ab1c1075707ce7a6fdda24a89cb8c10eaad424da +Digest: sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383 +---- + +. Enter the following command to discoverer referrers of the artifact using the `tag` schema: ++ +[source,terminal] +---- +$ oras discover --insecure --distribution-spec v1.1-referrers-tag \ +///: +---- ++ +.Example output ++ +[source,terminal] +---- +quay.io/testorg3/myartifact-image@sha256:db440c57edfad40c682f9186ab1c1075707ce7a6fdda24a89cb8c10eaad424da +└── doc/example + └── sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383 +---- + +. Enter the following command to discoverer referrers of the artifact using the `API` schema: ++ +[source,terminal] +---- +$ oras discover --distribution-spec v1.1-referrers-api \ +///: +---- ++ +.Example output ++ +[source,terminal] +---- +Discovered 3 artifacts referencing v1.0 +Digest: sha256:db440c57edfad40c682f9186ab1c1075707ce7a6fdda24a89cb8c10eaad424da + +Artifact Type Digest + sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383 + sha256:22b7e167793808f83db66f7d35fbe0088b34560f34f8ead36019a4cc48fd346b + sha256:bb2b7e7c3a58fd9ba60349473b3a746f9fe78995a88cb329fc2fd1fd892ea4e4 +---- + +. Optional. You can also discover referrers by using the `/v2///referrers/` endpoint. For this to work, you must generate a v2 API token and set `FEATURE_REFERRERS_API: true` in your `config.yaml` file. + +.. Update your `config.yaml` file to include the `FEATURE_REFERRERS_API` field. For example: ++ +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: true +# ... +---- + +.. Enter the following command to Base64 encode your credentials: ++ +[source,terminal] +---- +$ echo -n ':' | base64 +---- ++ +.Example output ++ +[source,terminal] +---- +abcdeWFkbWluOjE5ODlraWROZXQxIQ== +---- + +.. Enter the following command to use the base64 encoded token and modify the URL endpoint to your {productname} server: ++ +[source,terminal] +---- +$ curl --location '/v2/auth?service=&scope=repository:quay/listocireferrs:pull,push' --header 'Authorization: Basic ' -k | jq +---- ++ +.Example output ++ +[source,terminal] +---- +{ + "token": "..." +} +---- + +. Enter the following command, using the v2 API token, to list OCI referrers of a manifest under a repository: ++ +[source,terminal] +---- +$ GET https:///v2///referrers/sha256:0de63ba2d98ab328218a1b6373def69ec0d0e7535866f50589111285f2bf3fb8 +--header 'Authorization: Bearer -k | jq +---- ++ +.Example output ++ +[source,terminal] +---- +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383", + "size": 793 + }, + ] +} +---- diff --git a/modules/testing-ssl-tls-configuration.adoc b/modules/testing-ssl-tls-configuration.adoc new file mode 100644 index 000000000..520db12f7 --- /dev/null +++ b/modules/testing-ssl-tls-configuration.adoc @@ -0,0 +1,5 @@ +:_content-type: PROCEDURE +[id="testing-ssl-tls-configuration"] += Testing the SSL/TLS configuration + +Your SSL/TLS configuration can be tested by using the command-line interface (CLI). Use the following procedure to test your SSL/TLS configuration. \ No newline at end of file diff --git a/modules/token-overview.adoc b/modules/token-overview.adoc new file mode 100644 index 000000000..d9a12a22c --- /dev/null +++ b/modules/token-overview.adoc @@ -0,0 +1,15 @@ +:_content-type: CONCEPT +[id="token-overview"] += Introduction to {productname} OAuth 2.0 tokens + +The {productname} OAuth 2 token system provides a secure, standards-based method for accessing {productname}'s API and other relevant resources. The OAuth 2 token-based approach provides a secure method for handling authentication and authorization for complex environments. Compared to more traditional API tokens, {productname}'s OAuth 2 token system offers the following enhancements: + +* Standards-based security, which adheres to the link:https://oauth.net/2/[OAuth 2.0 protocol]. +* Revocable access by way of deleting the application in which the OAuth 2 token exists. +* Fine-grained access control, which allows {productname} administrators the ability to assign specific permissions to tokens. +* Delegated access, which allows third-party applications and services to act on behalf of a user. +* Future-proofing, which helps ensure that {productname} remains compatible with other services, platforms, and integrations. + +{productname} primarily supports two types of tokens: OAuth 2 access tokens and robot account tokens. A third token type, an _OCI referrers access token_, that is required to list OCI referrers of a manifest under a repository, is also available when warranted. + +The following chapters provide more details about each token type and how to generate each token type. \ No newline at end of file diff --git a/modules/troubleshooting-401-helm.adoc b/modules/troubleshooting-401-helm.adoc new file mode 100644 index 000000000..f4798fca1 --- /dev/null +++ b/modules/troubleshooting-401-helm.adoc @@ -0,0 +1,12 @@ +:_content-type: CONCEPT +[id="troubleshooting-401-helm"] += Troubleshooting Helm chart pushes on {productname} + +In some cases, pushing a Helm chart to your {productname} registry might return the following error: `Error: unexpected status: 401 UNAUTHORIZED`. This error primarily occurs when using robot accounts, and because Helm interprets the repository by assuming that the repository used to push is the name of the Helm chart. For example, if you create a chart named `etherpad` and then push to a repository named `etherpad`, it works. However, pushing to a different repository, for example, `samplerepo`, when using a robot account does not work because Helm interprets it as pushing to `samplerepo/etherpad`. This error occurs because usually robot accounts do not usually have permissions to create a repository. + +As a workaround for this issue, use a robot account that is added to a team that has creator privileges. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6973126[Unable to push helm chart to Quay registry prompts unexpected status: 401 UNAUTHORIZED]. \ No newline at end of file diff --git a/modules/troubleshooting-builds.adoc b/modules/troubleshooting-builds.adoc new file mode 100644 index 000000000..9d5e4b54c --- /dev/null +++ b/modules/troubleshooting-builds.adoc @@ -0,0 +1,93 @@ + +:_content-type: PROCEDURE +[id="troubleshooting-builds"] += Troubleshooting Builds + +The _builder_ instances started by the _build manager_ are ephemeral. This means that they will either get shut down by {productname} on timeouts or failure, or garbage collected by the control plane (EC2/K8s). In order to obtain the _builds_ logs, you must do so while the _builds_ are running. + +[id="debug-config-flag"] +== DEBUG config flag + +The `DEBUG` flag can be set to `true` in order to prevent the _builder_ instances from getting cleaned up after completion or failure. For example: + +[source,yaml] +---- + EXECUTORS: + - EXECUTOR: ec2 + DEBUG: true + ... + - EXECUTOR: kubernetes + DEBUG: true + ... +---- + +When set to `true`, the debug feature prevents the _build nodes_ from shutting down after the `quay-builder` service is done or fails. It also prevents the _build manager_ from cleaning up the instances by terminating EC2 instances or deleting Kubernetes jobs. This allows debugging _builder node_ issues. + +Debugging should not be set in a production cycle. The lifetime service still exists; for example, the instance still shuts down after approximately two hours. When this happens, EC2 instances are terminated and Kubernetes jobs are completed. + +Enabling debug also affects the `ALLOWED_WORKER_COUNT` because the unterminated instances and jobs still count toward the total number of running workers. As a result, the existing _builder workers_ must be manually deleted if `ALLOWED_WORKER_COUNT` is reached to be able to schedule new _builds_. + +ifdef::upstream[] +[id="troubleshooting-amazon-ec2"] +== Troubleshooting Amazon EC2 + +Use the following procedure to troubleshoot Amazon EC2 Builds. + +.Procedure + +. Start a Build in {productname}. + +. In the EC2 console, identify the Build instance. Build instances are named `Quay Ephemeral Builder` and have the tag {`: `} + +. Using the SSH key set by the `EC2_KEY_NAME` configuration field, log in to the Builder instance by running the following command: ++ +[source,terminal] +---- +$ ssh -i /path/to/ssh/key/in/ec2/or/config/id_rsa core@ +---- + +. Obtain the `quay-builder` service logs by entering the following commands: ++ +[source,terminal] +---- +$ systemctl status quay-builder +---- ++ +[source,terminal] +---- +$ journalctl -f -u quay-builder +---- +endif::upstream[] + +[id="openshift-kubernetes-troubleshooting"] +== Troubleshooting {ocp} and Kubernetes Builds + +Use the following procedure to troubleshooting {ocp} Kubernetes Builds. + +.Procedure + +. Create a port forwarding tunnel between your local machine and a pod running with either an {ocp} cluster or a Kubernetes cluster by entering the following command: ++ +[source,terminal] +---- +$ oc port-forward 9999:2222 +---- + +. Establish an SSH connection to the remote host using a specified SSH key and port, for example: ++ +[source,terminal] +---- +$ ssh -i /path/to/ssh/key/set/in/ssh_authorized_keys -p 9999 core@localhost +---- + +. Obtain the `quay-builder` service logs by entering the following commands: ++ +[source,terminal] +---- +$ systemctl status quay-builder +---- ++ +[source,terminal] +---- +$ journalctl -f -u quay-builder +---- diff --git a/modules/troubleshooting-components.adoc b/modules/troubleshooting-components.adoc new file mode 100644 index 000000000..0b1ddb7e0 --- /dev/null +++ b/modules/troubleshooting-components.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="troubleshooting-components"] += Troubleshooting {productname} components + +This document focuses on troubleshooting specific components within {productname}, providing targeted guidance for resolving issues that might arise. Designed for system administrators, operators, and developers, this resource aims to help diagnose and troubleshoot problems related to individual components of {productname}. + +In addition to the following procedures, {productname} components can also be troubleshot by running {productname} in debug mode, obtaining log information, obtaining configuration information, and performing health checks on endpoints. + +By using the following procedures, you are able to troubleshoot common component issues. Afterwards, you can search for solutions on the link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase], or file a support ticket with the Red Hat Support team. \ No newline at end of file diff --git a/modules/troubleshooting-forgotten-passwords.adoc b/modules/troubleshooting-forgotten-passwords.adoc new file mode 100644 index 000000000..81f9adba9 --- /dev/null +++ b/modules/troubleshooting-forgotten-passwords.adoc @@ -0,0 +1,110 @@ +:_content-type: CONCEPT +[id="troubleshooting-forgotten-passwords"] += Resetting superuser passwords on {productname} standalone deployments + +Use the following procedure to reset a superuser's password. + +.Prerequisites + +* You have created a {productname} superuser. +* You have installed Python 3.9. +* You have installed the `pip` package manager for Python. +* You have installed the `bcrypt` package for `pip`. + +.Procedure + +. Generate a secure, hashed password using the `bcrypt` package in Python 3.9 by entering the following command: ++ +[source,terminal] +---- +$ python3.9 -c 'import bcrypt; print(bcrypt.hashpw(b"newpass1234", bcrypt.gensalt(12)).decode("utf-8"))' +---- ++ +.Example output ++ +[source,terminal] +---- +$2b$12$T8pkgtOoys3G5ut7FV1She6vXlYgU.6TeoGmbbAVQtN8X8ch4knKm +---- + +. Enter the following command to show the container ID of your {productname} container registry: ++ +[source,terminal] +---- +$ sudo podman ps -a +---- ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +70560beda7aa registry.redhat.io/rhel8/redis-5:1 run-redis 2 hours ago Up 2 hours ago 0.0.0.0:6379->6379/tcp redis +8012f4491d10 registry.redhat.io/quay/quay-rhel8:v3.8.2 registry 3 minutes ago Up 8 seconds ago 0.0.0.0:80->8080/tcp, 0.0.0.0:443->8443/tcp quay +8b35b493ac05 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 39 seconds ago Up 39 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +---- + +. Execute an interactive shell for the `postgresql` container image by entering the following command: ++ +[source,terminal] +---- +$ sudo podman exec -it 8b35b493ac05 /bin/bash +---- + +. Re-enter the `quay` PostgreSQL database server, specifying the database, username, and host address: ++ +[source,terminal] +---- +bash-4.4$ psql -d quay -U quayuser -h 192.168.1.28 -W +---- + +. Update the `password_hash` of the superuser admin who lost their password: ++ +[source,terminal] +---- +quay=> UPDATE public.user SET password_hash = '$2b$12$T8pkgtOoys3G5ut7FV1She6vXlYgU.6TeoGmbbAVQtN8X8ch4knKm' where username = 'quayadmin'; +---- ++ +.Example output ++ +[source,terminal] +---- +UPDATE 1 +---- + +. Enter the following to command to ensure that the `password_hash` has been updated: ++ +[source,terminal] +---- +quay=> select * from public.user; +---- ++ +.Example output ++ +[source,terminal] +---- +id | uuid | username | password_hash | email | verified | stripe_id | organization | robot | invoice_email | invalid_login_attempts | last_invalid_login |removed_tag_expiration_s | enabled | invoice_email_address | company | family_name | given_name | location | maximum_queued_builds_count | creation_date | last_accessed +----+--------------------------------------+-----------+--------------------------------------------------------------+-----------------------+--- +-------+-----------+--------------+-------+---------------+------------------------+----------------------------+--------------------------+------ +---+-----------------------+---------+-------------+------------+----------+-----------------------------+----------------------------+----------- +1 | 73f04ef6-19ba-41d3-b14d-f2f1eed94a4a | quayadmin | $2b$12$T8pkgtOoys3G5ut7FV1She6vXlYgU.6TeoGmbbAVQtN8X8ch4knKm | quayadmin@example.com | t | | f | f | f | 0 | 2023-02-23 07:54:39.116485 | 1209600 | t | | | | | | | 2023-02-23 07:54:39.116492 +---- + +. Log in to your {productname} deployment using the new password: ++ +[source,terminal] +---- +$ sudo podman login -u quayadmin -p newpass1234 http://quay-server.example.com --tls-verify=false +---- ++ +.Example output ++ +[source,terminal] +---- +Login Succeeded! +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6964805[Resetting Superuser Password for Quay]. diff --git a/modules/troubleshooting-general.adoc b/modules/troubleshooting-general.adoc new file mode 100644 index 000000000..48418210b --- /dev/null +++ b/modules/troubleshooting-general.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="troubleshooting-general"] += General troubleshooting for {productname} + +The following sections detail general troubleshooting errors for {productname}. \ No newline at end of file diff --git a/modules/troubleshooting-how-tos.adoc b/modules/troubleshooting-how-tos.adoc new file mode 100644 index 000000000..4628c4206 --- /dev/null +++ b/modules/troubleshooting-how-tos.adoc @@ -0,0 +1,6 @@ +:_content-type: CONCEPT +[id="troubleshooting-how-tos"] += How To guide + +This "How to" guide provides step-by-step instructions for troubleshooting common issues encountered while using {productname}. Whether you're a system administrator, developer, or user, this guide helps identify and resolve problems effectively. + diff --git a/modules/troubleshooting-slow-pushes.adoc b/modules/troubleshooting-slow-pushes.adoc new file mode 100644 index 000000000..5b13b1219 --- /dev/null +++ b/modules/troubleshooting-slow-pushes.adoc @@ -0,0 +1,297 @@ +:_content-type: CONCEPT +[id="troubleshooting-slow-pushes"] += Troubleshooting slow image pushes and pulls on the {productname} Operator + +In some cases, your {productname} deployment on {ocp} might experience slow pushes and pulls. The {productname} Operator is only able to serve or accept container image data as fast as the underlying storage allows. There are various causes that might dictate pull speed on a {productname} registry, including: + +* Intermittent networking issues to {productname}'s s3 storage, as pulls directly depend on it. +* Slow backend storage. +* Various problems on {ocp} nodes. +* DNS issues in the cluster. +* Layers or blobs of an image are large, even if the image size is not. +* Using a VPN. +* High network bandwidth loads that lead to hogging network resources on certain {ocp} pods. + +To explore the root cause of some of these issues, you can run {productname} in debug mode. For more information, see Running {productname} in debug mode. + +Other troubleshooting procedures for slow pushes and pulls on the {productname} Operator can be found in subsequent sections. + +[id="understanding-quay-setup"] +== Understanding your {productname} setup + +In some cases, understanding your {productname} setup and the load on the cluster, and determining whether it is enough, can help diagnose slow pushes and pulls. + +[id="comparing-containizeration-platforms"] +== Comparing containerization platforms + +In some cases, pushes and pulls done with the Podman CLI might be slow. Use the following procedure to compare Podman pushes and pulls to Docker. + +.Procedure + +. Enter the following command to check how long it takes for the Podman client to pull images: ++ +[source,terminal] +---- +$ time podman pull //: +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 900e6061671b done +Copying config 8135583d97 done +Writing manifest to image destination +Storing signatures +8135583d97feb82398909c9c97607159e6db2c4ca2c885c0b8f590ee0f9fe90d +0.57user 0.11system 0:00.99elapsed 68%CPU (0avgtext+0avgdata 78716maxresident)k +800inputs+15424outputs (18major+6528minor)pagefaults 0swaps +---- + +. Compare the Podman time with another client's time, like Docker. For example: ++ +[source,terminal] +---- +$ time docker pull //: +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob sha256: [--------------------------------------] 0.0b/4.2MB +Copying config sha256: [--------------------------------------] 0.0b/1.5KB +Writing manifest to image destination +Storing signatures + +real 0m15.346s +user 0m0.056s +sys 0m0.020s +---- + +[id="checking-health-quay-pods"] +== Checking the health of your deployment using the API + +In some cases, the health of your `Quay` pods might be compromised. Use the following procedure to check the health of your `Quay` pods. + +.Procedure + +. The following commands run a health check on the `Quay` pods: ++ +.. If you are using custom certificates for {productname}, you can enter the following commands: ++ +[source,terminal] +---- +$ curl -k /health/instance +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- ++ +[source,terminal] +---- +$ curl -k /health/endtoend +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"auth":true,"database":true,"redis":true,"storage":true}},"status_code":200} +---- ++ +[source,terminal] +---- +$ curl -k /health/warning +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"disk_space_warning":true}},"status_code":200} +---- + +.. If you are not using custom certificates, enter the following commands: ++ +[source,terminal] +---- +$ curl /health/instance +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- ++ +[source,terminal] +---- +$ curl /health/endtoend +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"auth":true,"database":true,"redis":true,"storage":true}},"status_code":200} +---- ++ +[source,terminal] +---- +$ curl -k /health/warning +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"disk_space_warning":true}},"status_code":200} +---- + +. If the status of your `Quay` pod is reported as `unhealthy`, consult with your storage provider to ensure that it is supported for use with {productname}. Otherwise, you can check the link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Test Integrations] document. + + +[id="checking-network-connection"] +== Checking the network connection between {productname} and the storage location + +In some cases, the network connection between {productname} and its storage location might be erroneous. + +Use the following procedure to check the network connection between {productname} and the storage location. + +.Procedure + +* From a system that has access to {productname} and to the storage provider, enter the following command: ++ +[source,terminal] +---- +$ ping +---- ++ +.Example output ++ +[source,terminal] +---- +Destination Host Unreachable +---- ++ +If an error is returned, there is network connectively issues or the storage provider is currently unavailable. + +[id="checking-size-image"] +== Checking the size of the image + +In some cases, overall time when pulling an image can be determined by its size. Use the following procedure to check the size of an image. + +.Procedure + +. Enter the following command to obtain the image manifest: ++ +[source,terminal] +---- +$ curl -X GET https:///v2//manifests/ -H "Accept: application/vnd.docker.distribution.manifest.v2+json" +---- + +. Enter the following command to extract the image size from the response: ++ +[source,terminal] +---- +$ curl -X GET https:///v2//manifests/ -H "Accept: application/vnd.docker.distribution.manifest.v2+json" | jq '.config.size' +---- ++ +[NOTE] +==== +The response is in a JSON document. Locate the `config` field. Within that field, you will find a `size` property. The value of `size` represents the size of the image in bytes. +==== + +[id="checking-throughput-vms"] +== Checking the throughput of your virtual machines to your storage bucket + +Use the following procedure to check the throughput of your virtual machine to your storage provider. The execution time revealed in the following procedure might help you optimize performance, reveal why pushes and pulls are slow, or compare different configurations or setups. + +.Prerequisites + +* You have installed the AWS CLI (`aws`). + +.Procedure + +. Enter the following command to create a sample file of 500 MB, that is filled with random data, in the `/tmp` directory: ++ +[source,terminal] +---- +$ dd if=/dev/urandom of=/tmp/random-file count=10 bs=50M iflag=fullblock +---- + +. Enter the following command to set the value of your AWS access key: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID= +---- + +. Enter the following command to set the value of your AWS secret access key: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY=123456789ABCD +---- + +. Copy the sample file created in Step 1 to your storage bucket, measuring the execution time, by entering the following command: ++ +[source,terminal] +---- +$ time { aws s3 cp --no-verify-ssl --endpoint-url https://.com /tmp/random-file s3://; } +---- + +. Remove the sample file by entering the following command: ++ +[source,terminal] +---- +$ rm /tmp/random-file +---- + +. Copy the sample file from your storage bucket to your local directory, measuring the execution time, by entering the following command: ++ +[source,terminal] +---- +$ time { aws s3 cp --no-verify-ssl --endpoint-url https://.com s3:///random-file /tmp; } +---- ++ +Use this information to reveal insights into the performance of the virtual machine and storage provider that you are using. + +[id="obtaining-regional-information"] +== Obtaining regional information + +If your {productname} machine is located in a different region as your s3 bucket, pushes and pulls might be slower than expected. + +[id="configuring-firewall"] +== Configuring firewalls + +If your machine has any proxies or firewalls between the client pulling images and the {productname} registry, additional latency might be introduced, or bandwidth could be restricted. You can try disabling your firewall to improve push and pull speeds. + +[NOTE] +==== +If you are not using the `FEATURE_PROXY_STORAGE` feature, {productname} provides a direct download link to the client through Podman, Skopeo, or Docker. At this point, traffic does not go through {productname}. Instead, the client pings the underlying storage and requests the image layer. +==== + +[id="checking-antivirus-software"] +== Checking your antivirus software + +In some cases, antivirus software can interact with an image when it is pulled. This can increase the time that it takes to pull an image. Ensure that your antivirus software does not interfere with images when they are being pulled. + +[id="checking-resource-allocation"] +== Checking resource allocation + +In some cases, an under-provisioned machine can result in slow performance. Check the resource allocation for the machine that is hosting the `Quay` pod or container. Ensure that it has sufficient CPU, memory, and network resources allocated to handle the expected workload. + + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/articles/7023728[Image pull is slow in Quay]. diff --git a/modules/understanding-action-logs.adoc b/modules/understanding-action-logs.adoc new file mode 100644 index 000000000..e89f87aa6 --- /dev/null +++ b/modules/understanding-action-logs.adoc @@ -0,0 +1,234 @@ +:_content-type: CONCEPT +[id="understanding-action-logs"] += Understanding usage logs + +By default, usage logs are stored in the {productname} database. They are exposed through the web UI, on the organization and repository levels, and in the *Superuser Admin Panel*. + +Database logs capture a wide ranges of events in {productname}, such as the changing of account plans, user actions, and general operations. Log entries include information such as the action performed (`kind_id`), the user who performed the action (`account_id` or `performer_id`), the timestamp (`datetime`), and other relevant data associated with the action (`metadata_json`). + +[id="viewing-database-logs"] +== Viewing database logs + +The following procedure shows you how to view repository logs that are stored in a PostgreSQL database. + +.Prerequisites + +* You have administrative privileges. +* You have installed the `psql` CLI tool. + +.Procedure + +. Enter the following command to log in to your {productname} PostgreSQL database: ++ +[source,terminal] +---- +$ psql -h -p 5432 -U -d +---- ++ +.Example output ++ +[source,terminal] +---- +psql (16.1, server 13.7) +Type "help" for help. +---- + +. Optional. Enter the following command to display the tables list of your PostgreSQL database: ++ +[source,terminal] +---- +quay=> \dt +---- ++ +.Example output ++ +[source,terminal] +---- + List of relations + Schema | Name | Type | Owner +--------+----------------------------+-------+---------- + public | logentry | table | quayuser + public | logentry2 | table | quayuser + public | logentry3 | table | quayuser + public | logentrykind | table | quayuser +... +---- + +. You can enter the following command to return a list of `repository_ids` that are required to return log information: ++ +[source,terminal] +---- +quay=> SELECT id, name FROM repository; +---- ++ +.Example output ++ +[source,terminal] +---- + id | name +----+--------------------- + 3 | new_repository_name + 6 | api-repo + 7 | busybox +... +---- + +. Enter the following command to use the `logentry3` relation to show log information about one of your repositories: ++ +[source,terminal] +---- +SELECT * FROM logentry3 WHERE repository_id = ; +---- ++ +.Example output ++ +[source,terminal] +---- + id | kind_id | account_id | performer_id | repository_id | datetime | ip | metadata_json + + 59 | 14 | 2 | 1 | 6 | 2024-05-13 15:51:01.897189 | 192.168.1.130 | {"repo": "api-repo", "namespace": "test-org"} +---- ++ +In the above example, the following information is returned: ++ +[source,terminal] +---- +{ + "log_data": { + "id": 59 <1> + "kind_id": "14", <2> + "account_id": "2", <3> + "performer_id": "1", <4> + "repository_id": "6", <5> + "ip": "192.168.1.100", <6> + "metadata_json": {"repo": "api-repo", "namespace": "test-org"} <7> + "datetime": "2024-05-13 15:51:01.897189" <8> + } +} +---- +<1> The unique identifier for the log entry. +<2> The action that was done. In this example, it was `14`. The key, or table, in the following section shows you that this `kind_id` is related to the creation of a repository. +<3> The account that performed the action. +<4> The performer of the action. +<5> The repository that the action was done on. In this example, `6` correlates to the `api-repo` that was discovered in Step 3. +<6> The IP address where the action was performed. +<7> Metadata information, including the name of the repository and its namespace. +<8> The time when the action was performed. + +[id="log-entry-kind-ids"] +== Log entry kind_ids + +The following table represents the `kind_ids` associated with {productname} actions. + +[cols="1,3,6", options="header"] +|=== +|kind_id |Action |Description + +|1 |account_change_cc |Change of credit card information. +|2 |account_change_password |Change of account password. +|3 |account_change_plan |Change of account plan. +|4 |account_convert |Account conversion. +|5 |add_repo_accesstoken |Adding an access token to a repository. +|6 |add_repo_notification |Adding a notification to a repository. +|7 |add_repo_permission |Adding permissions to a repository. +|8 |add_repo_webhook |Adding a webhook to a repository. +|9 |build_dockerfile |Building a Dockerfile. +|10 |change_repo_permission |Changing permissions of a repository. +|11 |change_repo_visibility |Changing the visibility of a repository. +|12 |create_application |Creating an application. +|13 |create_prototype_permission |Creating permissions for a prototype. +|14 |create_repo |Creating a repository. +|15 |create_robot |Creating a robot (service account or bot). +|16 |create_tag |Creating a tag. +|17 |delete_application |Deleting an application. +|18 |delete_prototype_permission |Deleting permissions for a prototype. +|19 |delete_repo |Deleting a repository. +|20 |delete_repo_accesstoken |Deleting an access token from a repository. +|21 |delete_repo_notification |Deleting a notification from a repository. +|22 |delete_repo_permission |Deleting permissions from a repository. +|23 |delete_repo_trigger |Deleting a repository trigger. +|24 |delete_repo_webhook |Deleting a webhook from a repository. +|25 |delete_robot |Deleting a robot. +|26 |delete_tag |Deleting a tag. +|27 |manifest_label_add |Adding a label to a manifest. +|28 |manifest_label_delete |Deleting a label from a manifest. +|29 |modify_prototype_permission |Modifying permissions for a prototype. +|30 |move_tag |Moving a tag. +|31 |org_add_team_member |Adding a member to a team. +|32 |org_create_team |Creating a team within an organization. +|33 |org_delete_team |Deleting a team within an organization. +|34 |org_delete_team_member_invite |Deleting a team member invitation. +|35 |org_invite_team_member |Inviting a member to a team in an organization. +|36 |org_remove_team_member |Removing a member from a team. +|37 |org_set_team_description |Setting the description of a team. +|38 |org_set_team_role |Setting the role of a team. +|39 |org_team_member_invite_accepted |Acceptance of a team member invitation. +|40 |org_team_member_invite_declined |Declining of a team member invitation. +|41 |pull_repo |Pull from a repository. +|42 |push_repo |Push to a repository. +|43 |regenerate_robot_token |Regenerating a robot token. +|44 |repo_verb |Generic repository action (specifics might be defined elsewhere). +|45 |reset_application_client_secret |Resetting the client secret of an application. +|46 |revert_tag |Reverting a tag. +|47 |service_key_approve |Approving a service key. +|48 |service_key_create |Creating a service key. +|49 |service_key_delete |Deleting a service key. +|50 |service_key_extend |Extending a service key. +|51 |service_key_modify |Modifying a service key. +|52 |service_key_rotate |Rotating a service key. +|53 |setup_repo_trigger |Setting up a repository trigger. +|54 |set_repo_description |Setting the description of a repository. +|55 |take_ownership |Taking ownership of a resource. +|56 |update_application |Updating an application. +|57 |change_repo_trust |Changing the trust level of a repository. +|58 |reset_repo_notification |Resetting repository notifications. +|59 |change_tag_expiration |Changing the expiration date of a tag. +|60 |create_app_specific_token |Creating an application-specific token. +|61 |revoke_app_specific_token |Revoking an application-specific token. +|62 |toggle_repo_trigger |Toggling a repository trigger on or off. +|63 |repo_mirror_enabled |Enabling repository mirroring. +|64 |repo_mirror_disabled |Disabling repository mirroring. +|65 |repo_mirror_config_changed |Changing the configuration of repository mirroring. +|66 |repo_mirror_sync_started |Starting a repository mirror sync. +|67 |repo_mirror_sync_failed |Repository mirror sync failed. +|68 |repo_mirror_sync_success |Repository mirror sync succeeded. +|69 |repo_mirror_sync_now_requested |Immediate repository mirror sync requested. +|70 |repo_mirror_sync_tag_success |Repository mirror tag sync succeeded. +|71 |repo_mirror_sync_tag_failed |Repository mirror tag sync failed. +|72 |repo_mirror_sync_test_success |Repository mirror sync test succeeded. +|73 |repo_mirror_sync_test_failed |Repository mirror sync test failed. +|74 |repo_mirror_sync_test_started |Repository mirror sync test started. +|75 |change_repo_state |Changing the state of a repository. +|76 |create_proxy_cache_config |Creating proxy cache configuration. +|77 |delete_proxy_cache_config |Deleting proxy cache configuration. +|78 |start_build_trigger |Starting a build trigger. +|79 |cancel_build |Cancelling a build. +|80 |org_create |Creating an organization. +|81 |org_delete |Deleting an organization. +|82 |org_change_email |Changing organization email. +|83 |org_change_invoicing |Changing organization invoicing. +|84 |org_change_tag_expiration |Changing organization tag expiration. +|85 |org_change_name |Changing organization name. +|86 |user_create |Creating a user. +|87 |user_delete |Deleting a user. +|88 |user_disable |Disabling a user. +|89 |user_enable |Enabling a user. +|90 |user_change_email |Changing user email. +|91 |user_change_password |Changing user password. +|92 |user_change_name |Changing user name. +|93 |user_change_invoicing |Changing user invoicing. +|94 |user_change_tag_expiration |Changing user tag expiration. +|95 |user_change_metadata |Changing user metadata. +|96 |user_generate_client_key |Generating a client key for a user. +|97 |login_success |Successful login. +|98 |logout_success |Successful logout. +|99 |permanently_delete_tag |Permanently deleting a tag. +|100 |autoprune_tag_delete |Auto-pruning tag deletion. +|101 |create_namespace_autoprune_policy |Creating namespace auto-prune policy. +|102 |update_namespace_autoprune_policy |Updating namespace auto-prune policy. +|103 |delete_namespace_autoprune_policy |Deleting namespace auto-prune policy. +|104 |login_failure |Failed login attempt. +|=== + + + diff --git a/modules/understanding-tag-naming-build-triggers.adoc b/modules/understanding-tag-naming-build-triggers.adoc new file mode 100644 index 000000000..d0de06c1d --- /dev/null +++ b/modules/understanding-tag-naming-build-triggers.adoc @@ -0,0 +1,31 @@ +:_content-type: CONCEPT +[id="understanding-tag-naming-build-triggers"] += Tag naming for build triggers + +Custom tags are available for use in +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +ifeval::["{context}" == "quay-builders-image-automation"] +{productname}. +endif::[] + +One option is to include any string of characters assigned as a tag for each built image. Alternatively, you can use the following tag templates on the *Configure Tagging* section of the build trigger to tag images with information from each commit: + +image:custom-tagging.png[Configure Tagging] + +* *${commit}*: Full SHA of the issued commit +* *${parsed_ref.branch}*: Branch information (if available) +* *${parsed_ref.tag}*: Tag information (if available) +* *${parsed_ref.remote}*: The remote name +* *${commit_info.date}*: Date when the commit was issued +* *${commit_info.author.username}*: Username of the author of the commit +* *${commit_info.short_sha}*: First 7 characters of the commit SHA +* *${committer.properties.username}*: Username of the committer + +This list is not complete, but does contain the most useful options for tagging purposes. You can find the complete tag template schema on link:https://github.com/quay/quay/blob/abfde5b9d2cf7d7145e68a00c9274011b4fe0661/buildtrigger/basehandler.py#L96-L195[this page]. + +For more information, see link:https://access.redhat.com/solutions/7033393[Set up custom tag templates in build triggers for {productname} and {quayio}]. \ No newline at end of file diff --git a/modules/unknown-artifacts.adoc b/modules/unknown-artifacts.adoc new file mode 100644 index 000000000..b9a91c83f --- /dev/null +++ b/modules/unknown-artifacts.adoc @@ -0,0 +1,30 @@ +// Document included in the following assemblies: + +// Configuring Red hat Quay + +:_content-type: REFERENCE +[id="unknown-artifacts"] += Unknown media types + +.Unknown media types configuration field +[cols="3a,1a,2a",options="header"] +|=== +|Field |Type |Description + +|**IGNORE_UNKNOWN_MEDIATYPES** | Boolean | When enabled, allows a container registry platform to disregard specific restrictions on supported artifact types and accept any unrecognized or unknown media types. + +**Default:** `false` + +|=== + +[id="configuring-unknown-media-types"] +== Configuring unknown media types + +The following YAML is the example configuration when enabling unknown or unrecognized media types. + +.Unknown media types YAML configuration +[source,yaml] +---- +IGNORE_UNKNOWN_MEDIATYPES: true +---- + diff --git a/modules/unmanaging-clair-database.adoc b/modules/unmanaging-clair-database.adoc index 99929ee33..1d2acd13b 100644 --- a/modules/unmanaging-clair-database.adoc +++ b/modules/unmanaging-clair-database.adoc @@ -8,6 +8,11 @@ Use the following procedure to set your Clair database to unmanaged. +[IMPORTANT] +==== +You must not use the same externally managed PostgreSQL database for both {productname} and Clair deployments. Your PostgreSQL database must also not be shared with other workloads, as it might exhaust the natural connection limit on the PostgreSQL side when connection-intensive workloads, like {productname} or Clair, contend for resources. Additionally, pgBouncer is not supported with {productname} or Clair, so it is not an option to resolve this issue. +==== + .Procedure * In the Quay Operator, set the `clairpostgres` component of the `QuayRegistry` custom resource to `managed: false`: diff --git a/modules/unsupported-security-scan.adoc b/modules/unsupported-security-scan.adoc new file mode 100644 index 000000000..c79ad5943 --- /dev/null +++ b/modules/unsupported-security-scan.adoc @@ -0,0 +1,17 @@ +:_content-type: PROCEDURE +[id="unsupported-security-scan"] += Image security scan reporting Unsupported + +In some cases, Clair cannot scan images and returns the following error: `{"level":"error","component":"internal/indexer/controller/Controller.Index","manifest":"sha256:e76c212f0288f1f4fe79d219fc6a90514234ef1016babdb7e11946db959d1bac","state":"FetchLayers","error":"failed to fetch layers: encountered error while fetching a layer: fetcher: unexpected status code: 404 Not Found (body starts: \"NoSuchKeyThe specified key does not exist./quay/datastorage/registry/sha256/a3/a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4?AWSAccessKeyId=xxxxxxxxxxxx\")","time":"2022-10-12T06:59:42Z","message":"error during scan"}`. + +This error means that a particular layer is missing from the bucket. Objects in S3 bucket are referenced by keys. If a key is missing, that means that that object the key references is missing and is not found in the bucket. In the above example, the layer with SHA `a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4` is missing from the S3 bucket. + +To resolve this issue, the image to which the SHA IDs belong to must be re-pushed to the registry so that all blobs are re-pushed as well. + +[role="_additional-resources"] +.Additional resources + +For more information, see the following resources: + +* link:https://repost.aws/knowledge-center/404-error-nosuchkey-s3[How can I troubleshoot the 404 "NoSuchKey" error from Amazon S3?] +* link:https://access.redhat.com/solutions/6358352[Quay image SECURITY SCAN show Unsupported]. \ No newline at end of file diff --git a/modules/upgrading-geo-repl-quay-operator.adoc b/modules/upgrading-geo-repl-quay-operator.adoc new file mode 100644 index 000000000..1cb57bcf5 --- /dev/null +++ b/modules/upgrading-geo-repl-quay-operator.adoc @@ -0,0 +1,116 @@ +:_content-type: PROCEDURE +[id="upgrading-geo-repl-quay-operator"] += Upgrading a geo-replication deployment of {productname-ocp} + +Use the following procedure to upgrade your geo-replicated {productname-ocp} deployment. + +[IMPORTANT] +==== +* When upgrading geo-replicated {productname-ocp} deployment to the next y-stream release (for example, {productname} 3.7 -> {productname} 3.8), you must stop operations before upgrading. +* There is intermittent downtime down upgrading from one y-stream release to the next. +* It is highly recommended to back up your {productname-ocp} deployment before upgrading. +==== + +.Procedure + +[NOTE] +==== +This procedure assumes that you are running the {productname} registry on three or more systems. For this procedure, we will assume three systems named `System A,` `System B,` and `System C`. `System A` will serve as the primary system in which the {productname} Operator is deployed. +==== + +. On System B and System C, scale down your {productname} registry. This is done by disabling auto scaling and overriding the replica county for {productname}, mirror workers, and Clair if it is managed. Use the following `quayregistry.yaml` file as a reference: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: false <1> + - kind: quay + managed: true + overrides: <2> + replicas: 0 + - kind: clair + managed: true + overrides: + replicas: 0 + - kind: mirror + managed: true + overrides: + replicas: 0 + … +---- +<1> Disable auto scaling of `Quay`, `Clair` and `Mirroring` workers +<2> Set the replica count to 0 for components accessing the database and objectstorage ++ +[NOTE] +==== +You must keep the {productname} registry running on System A. Do not update the `quayregistry.yaml` file on System A. +==== + +. Wait for the `registry-quay-app`, `registry-quay-mirror`, and `registry-clair-app` pods to disappear. Enter the following command to check their status: ++ +[source,terminal] +---- +oc get pods -n +---- ++ +.Example output ++ +[source,terminal] +---- +quay-operator.v3.7.1-6f9d859bd-p5ftc 1/1 Running 0 12m +quayregistry-clair-postgres-7487f5bd86-xnxpr 1/1 Running 1 (12m ago) 12m +quayregistry-quay-app-upgrade-xq2v6 0/1 Completed 0 12m +quayregistry-quay-redis-84f888776f-hhgms 1/1 Running 0 12m +---- + +. On System A, initiate a {productname} upgrade to the latest y-stream version. This is a manual process. For more information about upgrading installed Operators, see link:https://docs.openshift.com/container-platform/{ocp-y}/operators/admin/olm-upgrading-operators.html[Upgrading installed Operators]. For more information about {productname} upgrade paths, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-upgrade#upgrading_the_quay_operator[Upgrading the {productname} Operator]. + +. After the new {productname} registry is installed, the necessary upgrades on the cluster are automatically completed. Afterwards, new {productname} pods are started with the latest y-stream version. Additionally, new `Quay` pods are scheduled and started. + +. Confirm that the update has properly worked by navigating to the {productname} UI: + +.. In the *OpenShift* console, navigate to *Operators* → *Installed Operators*, and click the *Registry Endpoint* link. ++ +[IMPORTANT] +==== +Do not execute the following step until the {productname} UI is available. Do not upgrade the {productname} registry on System B and on System C until the UI is available on System A. +==== + +. Confirm that the update has properly worked on System A, initiate the {productname} upgrade on System B and on System C. The Operator upgrade results in an upgraded {productname} installation, and the pods are restarted. ++ +[NOTE] +==== +Because the database schema is correct for the new y-stream installation, the new pods on System B and on System C should quickly start. +==== + +. After updating, revert the changes made in step 1 of this procedure by removing `overrides` for the components. For example: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: true <1> + - kind: quay + managed: true + - kind: clair + managed: true + - kind: mirror + managed: true + … +---- +<1> If the `horizontalpodautoscaler` resource was set to `true` before the upgrade procedure, or if you want {productname} to scale in case of a resource shortage, set it to `true`. diff --git a/modules/upgrading-geo-repl-quay.adoc b/modules/upgrading-geo-repl-quay.adoc new file mode 100644 index 000000000..53ecaa2a5 --- /dev/null +++ b/modules/upgrading-geo-repl-quay.adoc @@ -0,0 +1,184 @@ +:_content-type: PROCEDURE +[id="upgrading-geo-repl-quay"] += Upgrading a geo-replication deployment of standalone {productname} + +Use the following procedure to upgrade your geo-replication {productname} deployment. + +[IMPORTANT] +==== +* When upgrading geo-replication {productname} deployments to the next y-stream release (for example, {productname} 3.7 -> {productname} 3.8), or geo-replication deployments, you must stop operations before upgrading. +* There is intermittent downtime down upgrading from one y-stream release to the next. +* It is highly recommended to back up your {productname} deployment before upgrading. +==== + +.Prerequisites + +* You have logged into `registry.redhat.io` + +.Procedure + +[NOTE] +==== +This procedure assumes that you are running {productname} services on three (or more) systems. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploy_red_hat_quay_-_high_availability/index#preparing_for_red_hat_quay_high_availability[Preparing for {productname} high availability]. +==== + +. Obtain a list of all {productname} instances on each system running a {productname} instance. + +.. Enter the following command on System A to reveal the {productname} instances: ++ +[source,terminal] +---- +$ sudo podman ps +---- ++ +.Example output ++ +[source,terminal] ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +ec16ece208c0 registry.redhat.io/quay/quay-rhel8:v{producty-n1} registry 6 minutes ago Up 6 minutes ago 0.0.0.0:80->8080/tcp, 0.0.0.0:443->8443/tcp quay01 +---- + +.. Enter the following command on System B to reveal the {productname} instances: ++ +[source,terminal] +---- +$ sudo podman ps +---- ++ +.Example output ++ +[source,terminal] ++ +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +7ae0c9a8b37d registry.redhat.io/quay/quay-rhel8:v{producty-n1} registry 5 minutes ago Up 2 seconds ago 0.0.0.0:82->8080/tcp, 0.0.0.0:445->8443/tcp quay02 +---- + +.. Enter the following command on System C to reveal the {productname} instances: ++ +[source,terminal] +---- +$ sudo podman ps +---- ++ +.Example output ++ +[source,terminal] ++ +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +e75c4aebfee9 registry.redhat.io/quay/quay-rhel8:v{producty-n1} registry 4 seconds ago Up 4 seconds ago 0.0.0.0:84->8080/tcp, 0.0.0.0:447->8443/tcp quay03 +---- + +. Temporarily shut down all {productname} instances on each system. + +.. Enter the following command on System A to shut down the {productname} instance: ++ +[source,terminal] +---- +$ sudo podman stop ec16ece208c0 +---- + +.. Enter the following command on System B to shut down the {productname} instance: ++ +[source,terminal] +---- +$ sudo podman stop 7ae0c9a8b37d +---- + +.. Enter the following command on System C to shut down the {productname} instance: ++ +[source,terminal] +---- +$ sudo podman stop e75c4aebfee9 +---- + +. Obtain the latest {productname} version, for example, {productname} {producty}, on each system. + +.. Enter the following command on System A to obtain the latest {productname} version: ++ +[source,terminal] +---- +$ sudo podman pull registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +.. Enter the following command on System B to obtain the latest {productname} version: ++ +[source,terminal] +---- +$ sudo podman pull registry.redhat.io/quay/quay-rhel8:v{producty} +---- + +.. Enter the following command on System C to obtain the latest {productname} version: ++ +[source,terminal] +---- +$ sudo podman pull registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +. On System A of your highly available {productname} deployment, run the new image version, for example, {productname} {producty}: ++ +[source,terminal] +---- +# sudo podman run --restart=always -p 443:8443 -p 80:8080 \ + --sysctl net.core.somaxconn=4096 \ + --name=quay01 \ + -v /mnt/quay/config:/conf/stack:Z \ + -v /mnt/quay/storage:/datastorage:Z \ + -d registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +. Wait for the new {productname} container to become fully operational on System A. You can check the status of the container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman ps +---- ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +70b9f38c3fb4 registry.redhat.io/quay/quay-rhel8:v{producty} registry 2 seconds ago Up 2 seconds ago 0.0.0.0:82->8080/tcp, 0.0.0.0:445->8443/tcp quay01 +---- + +. Optional: Ensure that {productname} is fully operation by navigating to the {productname} UI. + +. After ensuring that {productname} on System A is fully operational, run the new image versions on System B and on System C. + +.. On System B of your highly available {productname} deployment, run the new image version, for example, {productname} {producty}: ++ +[source,terminal] +---- +# sudo podman run --restart=always -p 443:8443 -p 80:8080 \ + --sysctl net.core.somaxconn=4096 \ + --name=quay02 \ + -v /mnt/quay/config:/conf/stack:Z \ + -v /mnt/quay/storage:/datastorage:Z \ + -d registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +.. On System C of your highly available {productname} deployment, run the new image version, for example, {productname} {producty}: ++ +[source,terminal] +---- +# sudo podman run --restart=always -p 443:8443 -p 80:8080 \ + --sysctl net.core.somaxconn=4096 \ + --name=quay03 \ + -v /mnt/quay/config:/conf/stack:Z \ + -v /mnt/quay/storage:/datastorage:Z \ + -d registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +. You can check the status of the containers on System B and on System C by entering the following command: ++ +[source,terminal] +---- +$ sudo podman ps +---- \ No newline at end of file diff --git a/modules/upgrading-postgresql.adoc b/modules/upgrading-postgresql.adoc new file mode 100644 index 000000000..92c5c2303 --- /dev/null +++ b/modules/upgrading-postgresql.adoc @@ -0,0 +1,163 @@ +:_content-type: PROCEDURE +[id="upgrading-postgresql"] += Updating {productname} and the {productname} and Clair PostgreSQL databases on {ocp} + +[IMPORTANT] +==== +If your {productname} deployment is upgrading from one y-stream to the next, for example, from 3.8.10 -> 3.8.11, you must not switch the upgrade channel from `stable-3.8` to `stable-3.9`. Changing the upgrade channel in the middle of a y-stream upgrade will disallow {productname} from upgrading to 3.9. This is a known issue and will be fixed in a future version of {productname}. +==== + +When updating {productname} 3.8 -> 3.9, the Operator automatically upgrades the existing PostgreSQL databases for Clair and {productname} from version 10 to version 13. + +[IMPORTANT] +==== +* Users with a managed database are required to upgrade their PostgreSQL database from 10 -> 13. +* If your {productname} and Clair databases are managed by the Operator, the database upgrades for each component must succeed for the 3.9.0 upgrade to be successful. If either of the database upgrades fail, the entire {productname} version upgrade fails. This behavior is expected. +==== + +You can update {productname} and the {productname} and Clair PostgreSQL databases on {ocp} by using the *Web Console* UI, or by using the CLI. + +[id="updating-quay-clair-postgresql-db-console"] +== Updating {productname} and the {productname} and Clair PostgreSQL databases using the {ocp} web console + +Use the following procedure to update {productname} and the {productname} and Clair PostgreSQL databases using the {ocp} web console. + +[IMPORTANT] +==== +* This upgrade is irreversible. It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. +* If your {productname} and Clair databases are managed by the Operator, the database upgrades for each component must succeed for the 3.9.0 upgrade to be successful. If either of the database upgrades fail, the entire {productname} version upgrade fails. This behavior is expected. +* By default, {productname} is configured to save old persistent volume claims (PVCs) from PostgreSQL 10. To disable this setting and remove old PVCs, you must set `POSTGRES_UPGRADE_DELETE_BACKUP` to `True` in your `quay-operator` `Subscription` object. +==== + +.Prerequisites + +* You have installed {productname} 3.6, 3.7, or 3.8 on {ocp}. +* 100 GB of free, additional storage. ++ +During the upgrade process, additional persistent volume claims (PVCs) are provisioned to store the migrated data. This helps prevent a destructive operation on user data. The upgrade process rolls out PVCs for 50 GB for both the {productname} database upgrade, and the Clair database upgrade. + +.Procedure + +. Optional. Back up your old PVCs from PostgreSQL 10 by setting `POSTGRES_UPGRADE_DELETE_BACKUP` to `false` your `quay-operator` `Subscription` object. For example: ++ +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: quay-operator + namespace: quay-enterprise +spec: + channel: stable-3.8 + name: quay-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: POSTGRES_UPGRADE_DELETE_BACKUP <1> + value: "false" +---- +<1> When set to `true`, removes old PVCs after upgrading. + +. In the {ocp} Web Console, navigate to *Operators* -> *Installed Operators*. + +. Click on the {productname} Operator. + +. Navigate to the *Subscription* tab. + +. Under *Subscription details* click *Update channel*. + +. Select *stable-3.9* and save the changes. + +. Check the progress of the new installation under *Upgrade status*. Wait until the upgrade status changes to *1 installed* before proceeding. + +. In your {ocp} cluster, navigate to *Workloads* -> *Pods*. Existing pods should be terminated, or in the process of being terminated. + +. Wait for the following pods, which are responsible for upgrading the database and alembic migration of existing data, to spin up: `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade`. + +. After the `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade` pods are marked as *Completed*, the remaining pods for your {productname} deployment spin up. This takes approximately ten minutes. + +. Verify that the `quay-database` uses the `postgresql-13` image and `clair-postgres` pods now use the `postgresql-15` image. + +. After the `quay-app` pod is marked as *Running*, you can reach your {productname} registry. + +[id="updating-quay-clair-postgresql-db-cli"] +== Updating {productname} and the {productname} and Clair PostgreSQL databases using the CLI + +Use the following procedure to update {productname} and the {productname} and Clair PostgreSQL databases using the command-line interface (CLI). + +[IMPORTANT] +==== +* This upgrade is irreversible. It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. +* By default, {productname} is configured to save old persistent volume claims (PVCs) from PostgreSQL 10. To disable this setting and remove old PVCs, you must set `POSTGRES_UPGRADE_DELETE_BACKUP` to `True` in your `quay-operator` `Subscription` object. +==== + +.Prerequisites + +* You have installed {productname} 3.6, 3.7, or 3.8 on {ocp}. +* 100 GB of free, additional storage. ++ +During the upgrade process, additional persistent volume claims (PVCs) are provisioned to store the migrated data. This helps prevent a destructive operation on user data. The upgrade process rolls out PVCs for 50 GB for both the {productname} database upgrade, and the Clair database upgrade. + +.Procedure + +. Retrieve your `quay-operator` configuration file by entering the following `oc get` command: ++ +[source,terminal] +---- +$ oc get subscription quay-operator -n quay-enterprise -o yaml > quay-operator.yaml +---- + +. Retrieve the latest version of the {productname} Operator and its channel by entering the following command: ++ +[source,terminal] +---- +oc get packagemanifests quay-operator \ + -o jsonpath='{range .status.channels[*]}{@.currentCSV} {@.name}{"\n"}{end}' \ + | awk '{print "STARTING_CSV=" $1 " CHANNEL=" $2 }' \ + | sort -nr \ + | head -1 +---- ++ +.Example output ++ +[source,terminal] +---- +STARTING_CSV=quay-operator.v3.9.0 CHANNEL=stable-3.9 +---- + +. Using the output from the previous command, update your `Subscription` custom resource for the {productname} Operator and save it as `quay-operator.yaml`. For example: ++ +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: quay-operator + namespace: quay-enterprise +spec: + channel: stable-3.9 <1> + name: quay-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: POSTGRES_UPGRADE_DELETE_BACKUP <2> + value: "false" +---- +<1> Specify the value you obtained in the previous step for the `spec.channel` parameter. +<2> Optional. Back up your old PVCs from PostgreSQL 10 by setting `POSTGRES_UPGRADE_DELETE_BACKUP` to `false` your `quay-operator` `Subscription` object. + +. Enter the following command to apply the configuration: ++ +[source,terminal] +---- +$ oc apply -f quay-operator.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +subscription.operators.coreos.com/quay-operator created +---- \ No newline at end of file diff --git a/modules/use-quay-export-logs-api.adoc b/modules/use-quay-export-logs-api.adoc new file mode 100644 index 000000000..167512e52 --- /dev/null +++ b/modules/use-quay-export-logs-api.adoc @@ -0,0 +1,67 @@ +:_content-type: PROCEDURE +[id="use-quay-export-logs-api"] += Exporting logs by using the API + +Detailed logs can be exported to a callback URL or to an email address. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#exportuserlogs[`POST /api/v1/user/exportlogs`] endpoint to export logs for the current user: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "", + "endtime": "", + "callback_email": "your.email@example.com" + }' \ + "http:///api/v1/user/exportlogs" +---- ++ +.Example output ++ +[source,terminal] +---- +{"export_id": "6a0b9ea9-444c-4a19-9db8-113201c38cd4"} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#exportorglogs[`POST /api/v1/organization/{orgname}/exportlogs`] endpoint to export logs for an Organization: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "", + "endtime": "", + "callback_email": "org.logs@example.com" + }' \ + "http:///api/v1/organization/{orgname}/exportlogs" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#exportrepologs[`POST /api/v1/repository/{repository}/exportlogs`] endpoint to export logs for a repository: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "2024-01-01", + "endtime": "2024-06-18", + "callback_url": "http://your-callback-url.example.com" + }' \ + "http:///api/v1/repository/{repository}/exportlogs" +---- \ No newline at end of file diff --git a/modules/use-quay-export-logs.adoc b/modules/use-quay-export-logs.adoc new file mode 100644 index 000000000..73f1ba40a --- /dev/null +++ b/modules/use-quay-export-logs.adoc @@ -0,0 +1,58 @@ +:_content-type: PROCEDURE +[id="use-quay-export-logs"] += Exporting repository logs by using the UI + +ifeval::["{context}" == "quay-io"] +You can obtain a larger number of log files and save them outside of {quayio} by using the *Export Logs* feature. This feature has the following benefits and constraints: +endif::[] + +ifeval::["{context}" == "use-quay"] +You can obtain a larger number of log files and save them outside of the {productname} database by using the *Export Logs* feature. This feature has the following benefits and constraints: +endif::[] + +* You can choose a range of dates for the logs you want to gather from a repository. + +* You can request that the logs be sent to you by an email attachment or directed to a callback URL. + +* To export logs, you must be an administrator of the repository or namespace. + +* 30 days worth of logs are retained for all users. + +* Export logs only gathers log data that was previously produced. It does not stream logging data. + +ifeval::["{context}" == "use-quay"] +* Your {productname} instance must be configured for external storage for this feature. Local storage does not work for exporting logs. +endif::[] + +* When logs are gathered and made available to you, you should immediately copy that data if you want to save it. By default, the data expires after one hour. + +Use the following procedure to export logs. + +.Procedure + +. Select a repository for which you have administrator privileges. + +. Click the *Logs* tab. + +. Optional. If you want to specify specific dates, enter the range in the *From* and *to* boxes. + +. Click the *Export Logs* button. An Export Usage Logs pop-up appears, as shown ++ +image:export-usage-logs.png[Enter email or callback URL to receive exported logs] + +. Enter an email address or callback URL to receive the exported log. For the callback URL, you can use a URL to a specified domain, for example, . + +. Select *Confirm* to start the process for gather the selected log entries. Depending on the amount of logging data being gathered, this can take anywhere from a few minutes to several hours to complete. + +. When the log export is completed, the one of following two events happens: ++ +* An email is received, alerting you to the available of your requested exported log entries. + +* A successful status of your log export request from the webhook URL is returned. Additionally, a link to the exported data is made available for you to delete to download the logs. + +ifeval::["{context}" == "use-quay"] +[NOTE] +==== +The URL points to a location in your {productname} external storage and is set to expire within one hour. Make sure that you copy the exported logs before the expiration time if you intend to keep your logs. +==== +endif::[] \ No newline at end of file diff --git a/modules/use-quay-pull-image.adoc b/modules/use-quay-pull-image.adoc new file mode 100644 index 000000000..9a79b30b4 --- /dev/null +++ b/modules/use-quay-pull-image.adoc @@ -0,0 +1,65 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="use-quay-pull-image"] += Pulling an image + +_Pulling_ an image refers to the process of downloading a container image from a registry or another container registry to your local system or a container orchestration platform like Kubernetes or {ocp}. + +When you pull an image from {quayio}, you are essentially fetching a copy of that image to use on your local machine or in your container orchestration environment. This is a fundamental step in the containerization process, as it allows you to access the software contained within the image and run it as containers on your infrastructure. + +[IMPORTANT] +==== +As a safety measure against DDOS attacks, {quayio} rates limit pulls. If you are executing too many pulls from the same time, from a single client, you might receive a `429` response. +==== + +To pull an image from {quayio}, you typically use a container management tool like Podman or a container orchestration platform like {ocp}. + +Use the following procedure to pull an image from {quayio}. + +.Prerequisites + +* You have download and installed the `podman` CLI. +* You have logged into {quayio}. + +.Procedure + +. Enter the following command to pull a sample image, for example, `busybox`, from {quayio}: ++ +[source,terminal] +---- +$ podman pull quay.io/quay/busybox +---- ++ +.Example output ++ +[source,terminal] +---- +Trying to pull quay.io/quay/busybox... +Getting image source signatures +Copying blob 4c892f00285e done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 +---- + +. You can check that you have pulled the container by running the following command: ++ +[source,terminal] +---- +$ podman images +---- ++ +.Example output ++ +[source,terminal] +---- +stevsmit@stevsmit quay_io (quayio-book) $ podman images +REPOSITORY TAG IMAGE ID CREATED SIZE +quay.io/quay/busybox latest e3121c769e39 3 years ago 1.45 MB +---- \ No newline at end of file diff --git a/modules/user-create.adoc b/modules/user-create.adoc index 8bd1dadcd..b15b2c7ab 100644 --- a/modules/user-create.adoc +++ b/modules/user-create.adoc @@ -1,18 +1,20 @@ -[[user-create]] -= Creating user accounts -To create a new user for your {productname} instance: -. Log in to {productname} as the superuser (quay by default). -. Select your account name from the upper right corner of the home page and choose Super User Admin Panel. -. Select the Users icon from the left column. -. Select the Create User button. -. Enter the new user’s Username and Email address, then select the Create User button. -. Back on the Users page, select the Options icon to the right of the new Username. A drop-down menu appears, as shown in the following figure: -+ -image:user-options.png[Select Options drop-down to change user passwords] +// module included in the following assemblies: -. Choose Change Password from the menu. -. Add the new password and verify it, then select the Change User Password button. +// * use_quay/master.adoc +// * quay_io/master.adoc -The new user can now use that username and password to log in via the web ui or through some container client. +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="user-create"] +ifeval::["{context}" == "quay-io"] += {quayio} user accounts overview +endif::[] +ifeval::["{context}" == "use-quay"] += {productname} user accounts overview +endif::[] + +A _user account_ represents an individual with authenticated access to the platform's features and functionalities. User accounts provide the capability to create and manage repositories, upload and retrieve container images, and control access permissions for these resources. This account is pivotal for organizing and overseeing container image management within {productname}. + +You can create and delete new users on the z{productname} UI or by using the {productname} API. \ No newline at end of file diff --git a/modules/user-org-intro.adoc b/modules/user-org-intro.adoc index ba36ee7f2..25bbb32cc 100644 --- a/modules/user-org-intro.adoc +++ b/modules/user-org-intro.adoc @@ -1,7 +1,44 @@ -[[user-org-intro]] -= Users and organizations in {productname} +// Module included in the following assembles: -Before you begin creating repositories to hold your container images -in {productname}, you should consider how you want to organize those -repositories. Every repository in a {productname} instance must be -associated with either an Organization or a User. +// * quay_io/master.adoc +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="user-org-intro_{context}"] += {productname} tenancy model + +Before creating repositories to contain your container images in +ifeval::["{context}" == "quay-io"] +{quayio}, +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}, +endif::[] +you should consider how these repositories will be structured. With +ifeval::["{context}" == "quay-io"] +{quayio}, +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}, +endif::[] +each repository requires a connection with either an _Organization_ or a _User_. This affiliation defines ownership and access control for the repositories. + +//// +[discrete] +[id="tenancy-model"] +== Tenancy model + +image:178_Quay_architecture_0821_tenancy_model.png[Tenancy model] + +* **Organizations** provide a way of sharing repositories under a common namespace that does not belong to a single user. Instead, these repositories belong to several users in a shared setting, such as a company. + +* **Teams** provide a way for an Organization to delegate permissions. Permissions can be set at the global level (for example, across all repositories), or on specific repositories. They can also be set for specific sets, or groups, of users. + +* **Users** can log in to a registry through the web UI or a by using a client like Podman and using their respective login commands, for example, `$ podman login`. Each user automatically gets a user namespace, for example, `//`, or `quay.io/` if you are using {quayio}. + +ifeval::["{context}" == "use-quay"] +* **Superusers** have enhanced access and privileges through the *Super User Admin Panel* in the user interface. Superuser API calls are also available, which are not visible or accessible to normal users. +endif::[] + +* **Robot accounts** provide automated access to repositories for non-human users like pipeline tools. Robot accounts are similar to {ocp} *Service Accounts*. Permissions can be granted to a robot account in a repository by adding that account like you would another user or team. +//// \ No newline at end of file diff --git a/modules/user-permissions-repo.adoc b/modules/user-permissions-repo.adoc new file mode 100644 index 000000000..b5fa86ff3 --- /dev/null +++ b/modules/user-permissions-repo.adoc @@ -0,0 +1,88 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="repo-manage-user-permissions"] += Managing user permissions by using the {productname} API + +Use the following procedure to manage user permissions by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserpermissions[`GET /api/v1/repository/{repository}/permissions/user/{username}`] endpoint to obtain repository permissions for a user. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository//permissions/user/" +---- ++ +.Example output ++ +[source,terminal] +---- +$ {"role": "read", "name": "testuser", "is_robot": false, "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}, "is_org_member": false} +---- + +. All user permissions can be returned with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepouserpermissions[`GET /api/v1/repository/{repository}/permissions/user/`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/user/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": {"quayadmin": {"role": "admin", "name": "quayadmin", "is_robot": false, "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "is_org_member": true}, "test+example": {"role": "admin", "name": "test+example", "is_robot": true, "avatar": {"name": "test+example", "hash": "3b03050c26e900500437beee4f7f2a5855ca7e7c5eab4623a023ee613565a60e", "color": "#a1d99b", "kind": "robot"}, "is_org_member": true}}} +---- + +. Alternatively, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getusertransitivepermission[`GET /api/v1/repository/{repository}/permissions/user/{username}/transitive`] endpoint to return only the repository permission for the user: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository//permissions/user//transitive" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": [{"role": "admin"}]} +---- + +. You can change the user's permissions, such as making the user an `admin` by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeuserpermissions[`PUT /api/v1/repository/{repository}/permissions/user/{username}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": ""}' \ + "https://quay-server.example.com/api/v1/repository//permissions/user/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"role": "admin", "name": "testuser", "is_robot": false, "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}, "is_org_member": false} +---- + +. User permissions can be deleted by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteuserpermissions[`DELETE /api/v1/repository/{repository}/permissions/user/{username}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/user/" +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/using-other-oci-artifacts-with-quay.adoc b/modules/using-other-oci-artifacts-with-quay.adoc new file mode 100644 index 000000000..8410dac6c --- /dev/null +++ b/modules/using-other-oci-artifacts-with-quay.adoc @@ -0,0 +1,60 @@ +// Document included in the following assemblies: + +// Using Red Hat Quay + +:_content-type: REFERENCE +[id="using-other-oci-artifacts-with-quay"] += Using other artifact types + +By default, other artifact types are enabled for use by +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] + +ifeval::["{context}" == "use-quay"] +Use the following procedure to add additional OCI media types. + +.Prerequisites + +* You have set `FEATURE_GENERAL_OCI_SUPPORT` to `true` in your `config.yaml` file. + +.Procedure + +. In your `config.yaml` file, add the `ALLOWED_OCI_ARTIFACT_TYPES` configuration field. For example: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +ALLOWED_OCI_ARTIFACT_TYPES: + : + - + - + + : + - + - +---- + +. Add support for your desired artifact type, for example, Singularity Image Format (SIF), by adding the following to your `config.yaml` file: ++ +[source,yaml] +---- +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.oci.image.config.v1+json: + - application/vnd.dev.cosign.simplesigning.v1+json + application/vnd.cncf.helm.config.v1+json: + - application/tar+gzip + application/vnd.sylabs.sif.config.v1+json: + - application/vnd.sylabs.sif.layer.v1+tar +---- ++ +[IMPORTANT] +==== +When adding artifact types that are not configured by default, {productname} administrators will also need to manually add support for Cosign and Helm if desired. +==== ++ +Now, users can tag SIF images for their {productname} registry. +endif::[] \ No newline at end of file diff --git a/modules/using-the-api.adoc b/modules/using-the-api.adoc new file mode 100644 index 000000000..144c612a6 --- /dev/null +++ b/modules/using-the-api.adoc @@ -0,0 +1,87 @@ +:_content-type: REFERENCE +[id="using-the-api"] += Using the {productname} API + +After you have created an application and generated an OAuth 2 access token with the desired settings, you can pass in the access token to `GET`, `PUT`, `POST`, or `DELETE` settings by using the API from the CLI. Generally, a {productname} API command looks similar to the following example: + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " \ <1> + https:///api/v1/// <2> +---- +<1> The OAuth 2 access token that was generated through the {productname} UI. +<2> The URL of your {productname} deployment and the desired API endpoint. + +All {productname} APIs are documented in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#red_hat_quay_application_programming_interface_api[Application Programming Interface (API)] chapter. Understanding how they are documented is crucial to successful invocation. Take, for example, the following entry for the `createAppToken` API endpoint: + +[source,text] +---- +*createAppToken* <1> +Create a new app specific token for user. <2> + +*POST /api/v1/user/apptoken* <3> + +**Authorizations: **oauth2_implicit (**user:admin**) <4> + + Request body schema (application/json) + +*Path parameters* <5> + +Name: **title** +Description: Friendly name to help identify the token. +Schema: string + +*Responses* <6> + +|HTTP Code|Description |Schema +|201 |Successful creation | +|400 |Bad Request |<<_apierror,ApiError>> +|401 |Session required |<<_apierror,ApiError>> +|403 |Unauthorized access |<<_apierror,ApiError>> +|404 |Not found |<<_apierror,ApiError>> +|=== +---- +<1> The name of the API endpoint. +<2> A brief description of the API endpoint. +<3> The API endpoint used for invocation. +<4> The authorizations required to use the API endpoint. +<5> The available paths to be used with the API endpoint. In this example, `title` is the only path to be used with the `POST /api/v1/user/apptoken` endpoint. +<6> The API responses for this endpoint. + +In order to use an API endpoint, you pass in your access token and then include the appropriate fields depending on your needs. The following procedure shows you how to use the `POST /api/v1/user/apptoken` endpoint. + +.Prerequisites + +* You have access to the {productname} API, which entails having already created an OAuth 2 access token. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Create a user application by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#appspecifictokens[`POST /api/v1/user/apptoken`] API call: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " <1> + -H "Content-Type: application/json" \ + -d '{ + "title": "MyAppToken" <2> + }' \ + "http://quay-server.example.com/api/v1/user/apptoken" <3> +---- +<1> The Oauth access token. +<2> The name of your application token. +<3> The URL of your {productname} deployment appended with the `/api/v1/user/apptoken` endpoint. ++ +.Example output ++ +[source,terminal] +---- +{"token": {"uuid": "6b5aa827-cee5-4fbe-a434-4b7b8a245ca7", "title": "MyAppToken", "last_accessed": null, "created": "Wed, 08 Jan 2025 19:32:48 -0000", "expiration": null, "token_code": "K2YQB1YO0ABYV5OBUYOMF9MCUABN12Y608Q9RHFXBI8K7IE8TYCI4WEEXSVH1AXWKZCKGUVA57PSA8N48PWED9F27PXATFUVUD9QDNCE9GOT9Q8ACYPIN0HL"}} +---- + +.Verification + +* On the {productname} UI, click your username in the navigation pane -> *Account Settings*. The name of your application appears under the *Docker CLI and other Application Tokens* heading. For example: ++ +image::application-token.png[Application token] \ No newline at end of file diff --git a/modules/using-the-oauth-token.adoc b/modules/using-the-oauth-token.adoc index 5d4bfda1f..5aa9fa977 100644 --- a/modules/using-the-oauth-token.adoc +++ b/modules/using-the-oauth-token.adoc @@ -42,4 +42,4 @@ Example output: } ---- + -In this instance, the details for the `quayadmin` user are returned as it is the only user that has been created so far. \ No newline at end of file +In this instance, the details for the `quayadmin` user are returned as it is the only user that has been created so far. diff --git a/modules/using-v2-ui.adoc b/modules/using-v2-ui.adoc new file mode 100644 index 000000000..8c6f81cac --- /dev/null +++ b/modules/using-v2-ui.adoc @@ -0,0 +1,559 @@ +:_content-type: PROCEDURE +[id="using-v2-ui"] += Using the v2 UI + +ifeval::["{context}" == "quay-io"] +The {quayio} v2 UI is enabled by default, and can be toggled on or off at a user's discretion. +endif::[] +ifeval::["{context}" == "use-quay"] +Use the following procedures to configure, and use, the {productname} v2 UI. +endif::[] + +[id="reference-miscellaneous-v2-ui"] +== v2 user interface configuration +ifeval::["{context}" == "quay-io"] +On {quayio}, you can toggle between the current version of the user interface and the new version of the user interface. +endif::[] + +ifeval::["{context}" == "use-quay"] +With `FEATURE_UI_V2` enabled, you can toggle between the current version of the user interface and the new version of the user interface. +endif::[] + +[IMPORTANT] +==== +* This UI is currently in beta and subject to change. In its current state, users can only create, view, and delete organizations, repositories, and image tags. +* When using the old UI, timed-out sessions would require that the user input their password again in the pop-up window. With the new UI, users are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. +* There is a discrepancy in how image manifest sizes are reported between the legacy UI and the new UI. In the legacy UI, image manifests were reported in mebibytes. The v2 UI uses the standard definition of megabyte (MB) to report image manifest sizes. +==== + +.Procedure +ifeval::["{context}" == "use-quay"] +. In your deployment's `config.yaml` file, add the `FEATURE_UI_V2` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +FEATURE_TEAM_SYNCING: false +FEATURE_UI_V2: true +FEATURE_USER_CREATION: true +--- +---- +endif::[] + +. Log in to your +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +deployment. + +. In the navigation pane of your deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to new UI, and then click *Use Beta Environment*, for example: ++ +image:38-ui-toggle.png[{productname} v2 UI toggle] + +[id="creating-new-organization-v2-ui"] +=== Creating a new organization using the v2 UI + +.Prerequisites + +* You have toggled your deployment to use the v2 UI. + +Use the following procedure to create an organization using the v2 UI. + +.Procedure + +. Click *Organization* in the navigation pane. + +. Click *Create Organization*. + +. Enter an *Organization Name*, for example, `testorg`. + +. Click *Create*. + +Now, your example organization should populate under the *Organizations* page. + +[id="deleting-organization-v2"] +=== Deleting an organization using the v2 UI + +Use the following procedure to delete an organization using the v2 UI. + +.Procedure + +. On the *Organizations* page, select the name of the organization you want to delete, for example, `testorg`. + +. Click the *More Actions* drop down menu. + +. Click *Delete*. ++ +[NOTE] +==== +On the *Delete* page, there is a *Search* input box. With this box, users can search for specific organizations to ensure that they are properly scheduled for deletion. For example, if a user is deleting 10 organizations and they want to ensure that a specific organization was deleted, they can use the *Search* input box to confirm said organization is marked for deletion. +==== + +. Confirm that you want to permanently delete the organization by typing *confirm* in the box. + +. Click *Delete*. ++ +After deletion, you are returned to the *Organizations* page. ++ +[NOTE] +==== +You can delete more than one organization at a time by selecting multiple organizations, and then clicking *More Actions* -> *Delete*. +==== + +[id="creating-new-repository-v2"] +=== Creating a new repository using the v2 UI + +Use the following procedure to create a repository using the v2 UI. + +.Procedure + +. Click *Repositories* on the navigation pane. + +. Click *Create Repository*. + +. Select a namespace, for example, *quayadmin*, and then enter a *Repository name*, for example, `testrepo`. ++ +[IMPORTANT] +==== +Do not use the following words in your repository name: +* `build` +* `trigger` +* `tag` + +When these words are used for repository names, users are unable access the repository, and are unable to permanently delete the repository. Attempting to delete these repositories returns the following error: `Failed to delete repository , HTTP404 - Not Found.` +==== + +. Click *Create*. ++ +Now, your example repository should populate under the *Repositories* page. + +[id="deleting-repository-v2"] +=== Deleting a repository using the v2 UI + +.Prerequisites + +* You have created a repository. + +.Procedure + +. On the *Repositories* page of the v2 UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +[id="pushing-image-v2"] +=== Pushing an image to the v2 UI + +Use the following procedure to push an image to the v2 UI. + +.Procedure + +. Pull a sample image from an external registry: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your registry: ++ +[source,terminal] +---- +$ podman push quay-server.example.com/quayadmin/busybox:test +---- + +. Navigate to the *Repositories* page on the v2 UI and ensure that your image has been properly pushed. + +. You can check the security details by selecting your image tag, and then navigating to the *Security Report* page. + +[id="deleting-image-v2"] +=== Deleting an image using the v2 UI + +Use the following procedure to delete an image using the v2 UI. + +.Prerequisites + +* You have pushed an image to your registry. + +.Procedure + +. On the *Repositories* page of the v2 UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +[id="creating-team-v2-ui"] +=== Creating a new team using the {productname} v2 UI + +Use the following procedure to create a new team using the {productname} v2 UI. + +.Prerequisites + +* You have created an organization with a repository. + +.Procedure + +. On the {productname} v2 UI, click the name of an organization. + +. On your organization's page, click *Teams and membership*. + +. Click the *Create new team* box. + +. In the *Create team* popup window, provide a name for your new team. + +. Optional. Provide a description for your new team. + +. Click *Proceed*. A new popup window appears. + +. Optional. Add this team to a repository, and set the permissions to one of *Read*, *Write*, *Admin*, or *None*. + +. Optional. Add a team member or robot account. To add a team member, enter the name of their {productname} account. + +. Review and finish the information, then click *Review and Finish*. The new team appears under the *Teams and membership page*. From here, you can click the kebab menu, and select one of the following options: ++ +* **Manage Team Members**. On this page, you can view all members, team members, robot accounts, or users who have been invited. You can also add a new team member by clicking *Add new member*. + +* **Set repository permissions**. On this page, you can set the repository permissions to one of *Read*, *Write*, *Admin*, or *None*. + +* **Delete**. This popup windows allows you to delete the team by clicking *Delete*. + +. Optional. You can click the one of the following options to reveal more information about teams, members, and collaborators: + +* **Team View**. This menu shows all team names, the number of members, the number of repositories, and the role for each team. + +* **Members View**. This menu shows all usernames of team members, the teams that they are part of, the repository permissions of the user. + +* **Collaborators View**. This menu shows repository collaborators. Collaborators are users that do not belong to any team in the organization, but who have direct permissions on one or more repositories belonging to the organization. + +[id="creating-robot-account-v2-ui"] +=== Creating a robot account using the v2 UI + +Use the following procedure to create a robot account using the v2 UI. + +.Procedure + +. On the v2 UI, click *Organizations*. + +. Click the name of the organization that you will create the robot account for, for example, `test-org`. + +. Click the *Robot accounts* tab -> *Create robot account*. + +. In the *Provide a name for your robot account* box, enter a name, for example, `robot1`. + +. Optional. The following options are available if desired: + +.. Add the robot to a team. + +.. Add the robot to a repository. + +.. Adjust the robot's permissions. + +. On the *Review and finish* page, review the information you have provided, then click *Review and finish*. The following alert appears: *Successfully created robot account with robot name: + *. ++ +Alternatively, if you tried to create a robot account with the same name as another robot account, you might receive the following error message: *Error creating robot account*. + +. Optional. You can click *Expand* or *Collapse* to reveal descriptive information about the robot account. + +. Optional. You can change permissions of the robot account by clicking the kebab menu -> *Set repository permissions*. The following message appears: *Successfully updated repository permission*. + +. Optional. To delete your robot account, check the box of the robot account and click the trash can icon. A popup box appears. Type *confirm* in the text box, then, click *Delete*. Alternatively, you can click the kebab menu -> *Delete*. The following message appears: *Successfully deleted robot account*. + +[id="managing-robot-account-permissions-v2-ui"] +==== Bulk managing robot account repository access using the {productname} v2 UI + +Use the following procedure to manage, in bulk, robot account repository access using the {productname} v2 UI. + +.Prerequisites + +* You have created a robot account. +* You have created multiple repositories under a single organization. + +.Procedure + +. On the {productname} v2 UI landing page, click *Organizations* in the navigation pane. + +. On the *Organizations* page, select the name of the organization that has multiple repositories. The number of repositories under a single organization can be found under the *Repo Count* column. + +. On your organization's page, click *Robot accounts*. + +. For the robot account that will be added to multiple repositories, click the kebab icon -> *Set repository permissions*. + +. On the *Set repository permissions* page, check the boxes of the repositories that the robot account will be added to. For example: ++ +image:set-repository-permissions-robot-account.png[Set repository permissions] + +. Set the permissions for the robot account, for example, *None*, *Read*, *Write*, *Admin*. + +. Click *save*. An alert that says *Success alert: Successfully updated repository permission* appears on the *Set repository permissions* page, confirming the changes. + +. Return to the *Organizations* -> *Robot accounts* page. Now, the *Repositories* column of your robot account shows the number of repositories that the robot account has been added to. + +[id="default-permissions-v2-ui"] +=== Creating default permissions using the {productname} v2 UI + +Default permissions defines permissions that should be granted automatically to a repository when it is created, in addition to the default of the repository's creator. Permissions are assigned based on the user who created the repository. + +Use the following procedure to create default permissions using the {productname} v2 UI. + +.Procedure + +. Click the name of an organization. + +. Click *Default permissions*. + +. Click *create default permissions*. A toggle drawer appears. + +. Select either *Anyone* or *Specific user* to create a default permission when a repository is created. + +.. If selecting *Anyone*, the following information must be provided: ++ +* **Applied to**. Search, invite, or add a user/robot/team. +* **Permission**. Set the permission to one of *Read*, *Write*, or *Admin*. + +.. If selecting *Specific user*, the following information must be provided: ++ +* **Repository creator**. Provide either a user or robot account. +* **Applied to**. Provide a username, robot account, or team name. +* **Permission**. Set the permission to one of *Read*, *Write*, or *Admin*. + +. Click *Create default permission*. A confirmation box appears, returning the following alert: *Successfully created default permission for creator*. + +[id="organization-settings-v2-ui"] +=== Organization settings for the v2 UI + +Use the following procedure to alter your organization settings using the v2 UI. + +.Procedure + +. On the v2 UI, click *Organizations*. + +. Click the name of the organization that you will create the robot account for, for example, `test-org`. + +. Click the *Settings* tab. + +. Optional. Enter the email address associated with the organization. + +. Optional. Set the allotted time for the *Time Machine* feature to one of the following: ++ +* *1 week* +* *1 month* +* *1 year* +* *Never* + +. Click *Save*. + +[id="tag-overview-v2-ui"] +=== Viewing image tag information using the v2 UI + +Use the following procedure to view image tag information by using the v2 UI. + +.Procedure + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository, for example, `quayadmin/busybox`. + +. Click the name of the tag, for example, `test`. You are taken to the *Details* page of the tag. The page reveals the following information: ++ +* Name +* Repository +* Digest +* Vulnerabilities +* Creation +* Modified +* Size +* Labels +* How to fetch the image tag + +. Optional. Click *Security Report* to view the tag's vulnerabilities. You can expand an advisory column to open up CVE data. + +. Optional. Click *Packages* to view the tag's packages. + +. Click the name of the repository, for example, `busybox`, to return to the *Tags* page. + +. Optional. Hover over the *Pull* icon to reveal the ways to fetch the tag. + +. Check the box of the tag, or multiple tags, click the *Actions* drop down menu, and then *Delete* to delete the tag. Confirm deletion by clicking *Delete* in the popup box. + +[id="settings-overview-v2-ui"] +=== Adjusting repository settings using the v2 UI + +Use the following procedure to adjust various settings for a repository using the v2 UI. + +.Procedure + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository, for example, `quayadmin/busybox`. + +. Click the *Settings* tab. + +. Optional. Click *User and robot permissions*. You can adjust the settings for a user or robot account by clicking the dropdown menu option under *Permissions*. You can change the settings to *Read*, *Write*, or *Admin*. + +. Optional. Click *Events and notifications*. You can create an event and notification by clicking *Create Notification*. The following event options are available: ++ +* Push to Repository +* Package Vulnerability Found +* Image build failed +* Image build queued +* Image build started +* Image build success +* Image build cancelled ++ +Then, issue a notification. The following options are available: ++ +* Email Notification +* Flowdock Team Notification +* HipChat Room Notification +* Slack Notification +* Webhook POST ++ +After selecting an event option and the method of notification, include a *Room ID #*, a *Room Notification Token*, then, click *Submit*. + +. Optional. Click *Repository visibility*. You can make the repository private, or public, by clicking *Make Public*. + +. Optional. Click *Delete repository*. You can delete the repository by clicking *Delete Repository*. + +[id="viewing-tag-history-v2-ui"] +== Viewing {productname} tag history + +Use the following procedure to view tag history on the {productname} v2 UI. + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click *Tag History*. On this page, you can perform the following actions: ++ +* Search by tag name +* Select a date range +* View tag changes +* View tag modification dates and the time at which they were changed + +[id="adding-managing-labels"] +== Adding and managing labels on the {productname} v2 UI + +{productname} administrators can add and manage labels for tags by using the following procedure. + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab for an image and select *Edit labels*. + +. In the *Edit labels* window, click *Add new label*. + +. Enter a label for the image tag using the `key=value` format, for example, `com.example.release-date=2023-11-14`. ++ +[NOTE] +==== +The following error is returned when failing to use the `key=value` format: `Invalid label format, must be key value separated by =`. +==== + +. Click the whitespace of the box to add the label. + +. Optional. Add a second label. + +. Click *Save labels* to save the label to the image tag. The following notification is returned: `Created labels successfully`. + +. Optional. Click the same image tag's menu kebab -> *Edit labels* -> *X* on the label to remove it; alternatively, you can edit the text. Click *Save labels*. The label is now removed or edited. + +[id="setting-tag-expirations-v2-ui"] +== Setting tag expirations on the {productname} v2 UI + +{productname} administrators can set expiration dates for certain tags in a repository. This helps automate the cleanup of older or unused tags, helping to reduce storage space. + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab for an image and select *Change expiration*. + +. Optional. Alternatively, you can bulk add expiration dates by clicking the box of multiple tags, and then select *Actions* -> *Set expiration*. + +. In the *Change Tags Expiration* window, set an expiration date, specifying the day of the week, month, day of the month, and year. For example, `Wednesday, November 15, 2023`. Alternatively, you can click the calendar button and manually select the date. + +. Set the time, for example, `2:30 PM`. + +. Click *Change Expiration* to confirm the date and time. The following notification is returned: `Successfully set expiration for tag test to Nov 15, 2023, 2:26 PM`. + +. On the {productname} v2 UI *Tags* page, you can see when the tag is set to expire. For example: ++ +image:tag-expiration-v2-ui.png[{productname} v2 UI tag expiration] + +[id="selecting-dark-mode-ui"] +== Selecting color theme preference on the {productname} v2 UI + +Users can switch between light and dark modes when using the v2 UI. This feature also includes an automatic mode selection, which chooses between light or dark modes depending on the user's browser preference. + +Use the following procedure to switch between automatic, light, and dark modes. + +.Procedure + +. Log in to your {productname} repository. + +. In the navigation pane, click your username, for example, *quayadmin*. + +. Under *Appearance*, select between *Light theme*, *Dark theme*, and *Device-based theme*. Device based theme sets the mode depending on your browser's color preference. + +[id="viewing-usage-logs-v2-ui"] +== Viewing usage logs on the {productname} v2 UI + +{productname} logs can provide valuable information about the way that your {productname} registry is being used. Logs can be viewed by Organization, repository, or namespace on the v2 UI by using the following procedure. + +.Procedure + +. Log in to your {productname} registry. + +. Navigate to an Organization, repository, or namespace for which you are an administrator of. + +. Click *Logs*. ++ +image:logsv2-ui.png[Logs page] + +. Optional. Set the date range for viewing log entries by adding dates to the *From* and *To* boxes. + +. Optional. Export the logs by clicking *Export*. You must enter an email address or a valid callback URL that starts with `http://` or `https://`. This process can take an hour depending on how many logs there are. + +[id="enabling-legacy-ui"] +== Enabling the legacy UI + +. In the navigation pane, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to *Current UI*. ++ +image:38-ui-toggle.png[{productname} v2 UI toggle] \ No newline at end of file diff --git a/modules/viewing-additional-info-about-team-ui.adoc b/modules/viewing-additional-info-about-team-ui.adoc new file mode 100644 index 000000000..def39a34e --- /dev/null +++ b/modules/viewing-additional-info-about-team-ui.adoc @@ -0,0 +1,20 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="viewing-additional-info-about-team-ui"] +== Viewing additional information about a team + +Use the following procedure to view general information about the team. + +.Procedure + +* On the *Teams and membership* page of your organization, you can click the one of the following options to reveal more information about teams, members, and collaborators: + +** **Team View**. This menu shows all team names, the number of members, the number of repositories, and the role for each team. + +** **Members View**. This menu shows all usernames of team members, the teams that they are part of, the repository permissions of the user. + +** **Collaborators View**. This menu shows repository collaborators. Collaborators are users that do not belong to any team in the organization, but who have direct permissions on one or more repositories belonging to the organization. \ No newline at end of file diff --git a/modules/viewing-and-modifying-tags.adoc b/modules/viewing-and-modifying-tags.adoc new file mode 100644 index 000000000..b57edb7e6 --- /dev/null +++ b/modules/viewing-and-modifying-tags.adoc @@ -0,0 +1,33 @@ +:_content-type: PROCEDURE +[id="viewing-and-modifying-tags"] += Viewing image tag information by using the UI + +Use the following procedure to view image tag information using the v2 UI. + +.Prerequisites + +* You have pushed an image tag to a repository. + +.Procedure + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository. + +. Click the name of a tag. You are taken to the *Details* page of that tag. The page reveals the following information: ++ +* Name +* Repository +* Digest +* Vulnerabilities +* Creation +* Modified +* Size +* Labels +* How to fetch the image tag + +. Click *Security Report* to view the tag's vulnerabilities. You can expand an advisory column to open up CVE data. + +. Click *Packages* to view the tag's packages. + +. Click the name of the repository to return to the *Tags* page. \ No newline at end of file diff --git a/modules/viewing-model-card-information.adoc b/modules/viewing-model-card-information.adoc new file mode 100644 index 000000000..82bae429a --- /dev/null +++ b/modules/viewing-model-card-information.adoc @@ -0,0 +1,38 @@ +:_content-type: PROCEDURE +[id="viewing-model-card-information"] += Viewing model card information by using the UI + +Model card information can be viewed on the v2 UI. Model cards are essentially markdown (`.md`) files with additional metadata that provide information about a machine learning application. To view model card information, a manifest must have an annotation that is defined in your `config.yaml` file (for example, `application/x-mlmodel`) and include a model card stored as a layer in the manifest. When these conditions are met, a *Model Card* tab appears on the *Details* page of a tag. + +* You have pushed an artifact of that annotation type, and it includes a model card (`.md`) file. + +.Procedure + +. Update your `config.yaml` file to include the following information: ++ +.Example model card YAML +[source,yaml] +---- +FEATURE_UI_MODELCARD: true <1> +UI_MODELCARD_ARTIFACT_TYPE: application/x-mlmodel <2> +UI_MODELCARD_ANNOTATION: <3> + org.opencontainers.image.description: "Model card metadata" +UI_MODELCARD_LAYER_ANNOTATION: <4> + org.opencontainers.image.title: README.md +---- +<1> Enables the Model Card image tab in the UI. +<2> Defines the model card artifact type. In this example, the artifact type is `application/x-mlmodel`. +<3> Optional. If an image does not have an `artifactType` defined, this field is checked at the manifest level. If a matching annotation is found, the system then searches for a layer with an annotation matching `UI_MODELCARD_LAYER_ANNOTATION`. +<4> Optional. If an image has an `artifactType` defined and multiple layers, this field is used to locate the specific layer containing the model card. + +. Push an artifact of that annotation type, and one that includes a model card (`.md`) file, to your repository. + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository. + +. Click the name of a tag. You are taken to the *Details* page of that tag. + +. Click *ModelCard* to view information about the image. For example: ++ +image::modelcard.png[Modelcard information] \ No newline at end of file diff --git a/modules/viewing-tag-history-v2-api.adoc b/modules/viewing-tag-history-v2-api.adoc new file mode 100644 index 000000000..df2a86088 --- /dev/null +++ b/modules/viewing-tag-history-v2-api.adoc @@ -0,0 +1,51 @@ +:_content-type: PROCEDURE +[id="viewing-tag-history-v2-api"] += Viewing {productname} tag history by using the API + +{productname} offers a comprehensive history of images and their respective image tags. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to view tag history by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] command and passing in one of the following queries: ++ +* *onlyActiveTags=*: Filters to only include active tags. + +* *page=*: Specifies the page number of results to retrieve. + +* *limit=*: Limits the number of results per page. + +* *specificTag=*: Filters the tags to include only the tag with the specified name. ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "https:///api/v1/repository///tag/?onlyActiveTags=true&page=1&limit=10" +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test-two", "reversion": false, "start_ts": 1717680780, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Thu, 06 Jun 2024 13:33:00 -0000"}, {"name": "tag-test", "reversion": false, "start_ts": 1717680378, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Thu, 06 Jun 2024 13:26:18 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}], "page": 1, "has_additional": false} +---- + +. By using the `specificTag=` query, you can filter results for a specific tag. For example: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" "/api/v1/repository/quayadmin/busybox/tag/?onlyActiveTags=true&page=1&limit=20&specificTag=test-two" +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test-two", "reversion": true, "start_ts": 1718737153, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 18 Jun 2024 18:59:13 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/viewing-tag-history-v2-ui.adoc b/modules/viewing-tag-history-v2-ui.adoc new file mode 100644 index 000000000..d6d24302f --- /dev/null +++ b/modules/viewing-tag-history-v2-ui.adoc @@ -0,0 +1,24 @@ +:_content-type: PROCEDURE +[id="viewing-tag-history-v2-ui"] += Viewing {productname} tag history by using the UI + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive history of images and their respective image tags. + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click *Tag History*. On this page, you can perform the following actions: ++ +* Search by tag name +* Select a date range +* View tag changes +* View tag modification dates and the time at which they were changed \ No newline at end of file diff --git a/modules/viewing-tags-api.adoc b/modules/viewing-tags-api.adoc new file mode 100644 index 000000000..10163a0b6 --- /dev/null +++ b/modules/viewing-tags-api.adoc @@ -0,0 +1,47 @@ +:_content-type: CONCEPT +[id="viewing-and-modifying-tags-api"] += Viewing image tag information by using the API + +Use the following procedure to view image tag information by using the API + +.Prerequisites + +* You have pushed an image tag to a {productname} repository. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. To obtain tag information, you must use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepo[`GET /api/v1/repository/{repository}`] API endpoint and pass in the `includeTags` parameter. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//?includeTags=true +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "quayadmin", "name": "busybox", "kind": "image", "description": null, "is_public": false, "is_organization": false, "is_starred": false, "status_token": "d8f5e074-690a-46d7-83c8-8d4e3d3d0715", "trust_enabled": false, "tag_expiration_s": 1209600, "is_free_account": true, "state": "NORMAL", "tags": {"example": {"name": "example", "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000", "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d"}, "test": {"name": "test", "size": 2275314, "last_modified": "Tue, 14 May 2024 14:04:48 -0000", "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d"}}, "can_write": true, "can_admin": true} +---- + +. Alternatively, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test-two", "reversion": true, "start_ts": 1718737153, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 18 Jun 2024 18:59:13 -0000"}, {"name": "test-two", "reversion": false, "start_ts": 1718737029, "end_ts": 1718737153, "manifest_digest": "sha256:0cd3dd6236e246b349e63f76ce5f150e7cd5dbf2f2f1f88dbd734430418dbaea", "is_manifest_list": false, "size": 2275317, "last_modified": "Tue, 18 Jun 2024 18:57:09 -0000", "expiration": "Tue, 18 Jun 2024 18:59:13 -0000"}, {"name": "test-two", "reversion": false, "start_ts": 1718737018, "end_ts": 1718737029, "manifest_digest": "sha256:0cd3dd6236e246b349e63f76ce5f150e7cd5dbf2f2f1f88dbd734430418dbaea", "is_manifest_list": false, "size": 2275317, "last_modified": "Tue, 18 Jun 2024 18:56:58 -0000", "expiration": "Tue, 18 Jun 2024 18:57:09 -0000"}, {"name": "sample_tag", "reversion": false, "start_ts": 1718736147, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 18 Jun 2024 18:42:27 -0000"}, {"name": "test-two", "reversion": false, "start_ts": 1717680780, "end_ts": 1718737018, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Thu, 06 Jun 2024 13:33:00 -0000", "expiration": "Tue, 18 Jun 2024 18:56:58 -0000"}, {"name": "tag-test", "reversion": false, "start_ts": 1717680378, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Thu, 06 Jun 2024 13:26:18 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/viewing-usage-logs-api.adoc b/modules/viewing-usage-logs-api.adoc new file mode 100644 index 000000000..7700a9149 --- /dev/null +++ b/modules/viewing-usage-logs-api.adoc @@ -0,0 +1,115 @@ +:_content-type: CONCEPT +[id="viewing-usage-logs-v2-api"] += Viewing usage logs by using the API + +Logs can be viewed by Organization or repository by using the API. They can also be aggregated (grouped), or listed with more detailed. Logs can also be viewed by user, a specific date range, or by page. + +[id="viewing-aggregated-logs-api"] +== Viewing aggregated logs + +Aggregated logs can be viewed by Organization, repository, a specific user, or the current user. You can also pass in optional commands like `performer`, `starttime/endtime`, and `next_page` to filter results. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getaggregateuserlogs[`GET /api/v1/user/aggregatelogs`] API endpoint to return the aggregated (or grouped) logs for the current user: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "https:///api/v1/user/aggregatelogs" +---- ++ +.Example output ++ +[source,terminal] +---- +{"aggregated": [{"kind": "create_tag", "count": 1, "datetime": "Tue, 18 Jun 2024 00:00:00 -0000"}, {"kind": "manifest_label_add", "count": 1, "datetime": "Tue, 18 Jun 2024 00:00:00 -0000"}, {"kind": "push_repo", "count": 2, "datetime": "Tue, 18 Jun 2024 00:00:00 -0000"}, {"kind": "revert_tag", "count": 1, "datetime": "Tue, 18 Jun 2024 00:00:00 -0000"}]} +---- ++ +You can also pass in the `performer` and `starttime/endtime` queries to obtain aggregated logs for a specific user between a specific time period. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/user/aggregatelogs?performer=&starttime=&endtime=" +---- + + +. Aggregated logs can also be viewed by Organization by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getaggregateorglogs[`GET /api/v1/organization/{orgname}/aggregatelogs`]. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/organization/{orgname}/aggregatelogs" +---- + +. Aggregated logs can also be viewed by repository by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getaggregaterepologs[`GET /api/v1/repository/{repository}/aggregatelogs`] command. The following example includes the `starttime/endtime` fields: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/repository///aggregatelogs?starttime=2024-01-01&endtime=2024-06-18"" +---- + +[id="viewing-logs-api"] +== Viewing detailed logs + +Detailed logs can be viewed by Organization, repository, a specific user, or the current user. You can also pass in optional fields like `performer`, `starttime/endtime`, and `next_page` to filter results. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listuserlogs[`GET /api/v1/user/logs`] API endpoint to return a list of log entries for a user. For example: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" "/api/v1/user/logs" +---- ++ +You can also pass in the `performer` and `startime/endtime` queries to obtain logs for a specific user between a specific time period. For example: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" "http://quay-server.example.com/api/v1/user/logs?performer=quayuser&starttime=01/01/2024&endtime=06/18/2024" +---- ++ +.Example output ++ +[source,terminal] +---- +--- +{"start_time": "Mon, 01 Jan 2024 00:00:00 -0000", "end_time": "Wed, 19 Jun 2024 00:00:00 -0000", "logs": [{"kind": "revert_tag", "metadata": {"username": "quayuser", "repo": "busybox", "tag": "test-two", "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d"}, "ip": "192.168.1.131", "datetime": "Tue, 18 Jun 2024 18:59:13 -0000", "performer": {"kind": "user", "name": "quayuser", "is_robot": false, "avatar": {"name": "quayuser", "hash": "b28d563a6dc76b4431fc7b0524bbff6b810387dac86d9303874871839859c7cc", "color": "#17becf", "kind": "user"}}}, {"kind": "push_repo", "metadata": {"repo": "busybox", "namespace": "quayuser", "user-agent": "containers/5.30.1 (github.com/containers/image)", "tag": "test-two", "username": "quayuser", } +--- +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorglogs[`GET /api/v1/organization/{orgname}/logs`] endpoint to return logs for a specified organization: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "http:///api/v1/organization/{orgname}/logs" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepologs[`GET /api/v1/repository/{repository}/logs`] endpoint to return logs for a specified repository: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "http:///api/v1/repository/{repository}/logs" +---- \ No newline at end of file diff --git a/modules/viewing-usage-logs-v2-ui.adoc b/modules/viewing-usage-logs-v2-ui.adoc new file mode 100644 index 000000000..1350fedc8 --- /dev/null +++ b/modules/viewing-usage-logs-v2-ui.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="viewing-usage-logs-v2-ui"] += Viewing usage logs + +Logs can provide valuable information about the way that your registry is being used. Logs can be viewed by Organization, repository, or namespace on the v2 UI by using the following procedure. + +.Procedure + +. Log in to your {productname} registry. + +. Navigate to an Organization, repository, or namespace for which you are an administrator of. + +. Click *Logs*. ++ +image:logsv2-ui.png[Logs page] + +. Optional. Set the date range for viewing log entries by adding dates to the *From* and *To* boxes. + +. Optional. Export the logs by clicking *Export*. You must enter an email address or a valid callback URL that starts with `http://` or `https://`. This process can take an hour depending on how many logs there are. \ No newline at end of file diff --git a/quay_io/docinfo.xml b/quay_io/docinfo.xml new file mode 100644 index 000000000..a6b55b15a --- /dev/null +++ b/quay_io/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +About Quay IO + + About Quay IO + + + Red Hat OpenShift Documentation Team + + diff --git a/quay_io/master.adoc b/quay_io/master.adoc new file mode 100644 index 000000000..3e709b54f --- /dev/null +++ b/quay_io/master.adoc @@ -0,0 +1,113 @@ +:_content-type: ASSEMBLY + +include::modules/attributes.adoc[] + +[id="quay-io"] += About Quay IO + +:context: quay-io + +This comprehensive guide provides users with the knowledge and tools needed to make the most of our robust and feature-rich container registry service, {quayio}. + +include::modules/quayio-overview.adoc[leveloffset=+1] +include::modules/quayio-support.adoc[leveloffset=+1] +//ui +include::modules/quayio-ui-overview.adoc[leveloffset=+1] +include::modules/quayio-main-page.adoc[leveloffset=+2] + +include::modules/user-org-intro.adoc[leveloffset=+1] +include::modules/tenancy-model.adoc[leveloffset=+2] +include::modules/logging-into-quayio.adoc[leveloffset=+2] +//organization +include::modules/organizations-overview.adoc[leveloffset=+1] +include::modules/org-create.adoc[leveloffset=+2] +include::modules/organization-settings-v2-ui.adoc[leveloffset=+2] +//repo +include::modules/proc_use-quay-create-repo.adoc[leveloffset=+1] +include::modules/creating-an-image-repository-via-the-ui.adoc[leveloffset=+2] +include::modules/creating-an-image-repository-via-docker.adoc[leveloffset=+2] +include::modules/deleting-an-image-repository-via-ui.adoc[leveloffset=+2] +include::modules/proc_configure-user-settings.adoc[leveloffset=+2] +//robot accounts +include::modules/robot-account-overview.adoc[leveloffset=+1] +include::modules/creating-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/managing-robot-account-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/disabling-robot-account.adoc[leveloffset=+2] +include::modules/deleting-robot-account-v2-ui.adoc[leveloffset=+2] +// federation +include::modules/keyless-authentication-robot-accounts.adoc[leveloffset=+2] + +//access management repositories +include::modules/proc_use-quay-manage-repo.adoc[leveloffset=+1] +include::modules/teams-overview.adoc[leveloffset=+2] +include::modules/creating-a-team-ui.adoc[leveloffset=+3] + +include::modules/managing-team-ui.adoc[leveloffset=+3] +include::modules/add-users-to-team.adoc[leveloffset=+4] +include::modules/set-team-role.adoc[leveloffset=+4] +include::modules/managing-team-members-repo-permissions-ui.adoc[leveloffset=+4] +include::modules/viewing-additional-info-about-team-ui.adoc[leveloffset=+4] + +include::modules/managing-a-team-api.adoc[leveloffset=+3] +include::modules/setting-role-of-team-within-organization-api.adoc[leveloffset=+4] + +include::modules/default-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/allow-access-user-repo.adoc[leveloffset=+2] + +//tags +include::modules/image-tags-overview.adoc[leveloffset=+1] +include::modules/viewing-and-modifying-tags.adoc[leveloffset=+2] +include::modules/adding-a-new-tag-to-image.adoc[leveloffset=+2] +include::modules/adding-managing-labels.adoc[leveloffset=+2] +include::modules/setting-tag-expirations-v2-ui.adoc[leveloffset=+2] +include::modules/fetching-images-and-tags.adoc[leveloffset=+2] +include::modules/viewing-tag-history-v2-ui.adoc[leveloffset=+2] +include::modules/deleting-a-tag.adoc[leveloffset=+2] +include::modules/reverting-tag-changes.adoc[leveloffset=+2] + +//view and export logs +include::modules/proc_use-quay-view-export-logs.adoc[leveloffset=+1] +include::modules/viewing-usage-logs-v2-ui.adoc[leveloffset=+2] +include::modules/use-quay-export-logs.adoc[leveloffset=+2] + +//clair +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/about-clair.adoc[leveloffset=+2] +include::modules/security-scanning-ui.adoc[leveloffset=+2] +include::modules/clair-severity-mapping.adoc[leveloffset=+2] + +//docker files +//include::modules/proc_use-quay-build-workers-dockerfiles.adoc[leveloffset=+1] +include::modules/proc_use-quay-build-dockerfiles.adoc[leveloffset=+1] +include::modules/understanding-tag-naming-build-triggers.adoc[leveloffset=+2] +include::modules/skipping-source-control-triggered-build.adoc[leveloffset=+2] +include::modules/starting-a-build.adoc[leveloffset=+2] + + +include::modules/build-trigger-overview.adoc[leveloffset=+2] +include::modules/red-hat-quay-builders-ui.adoc[leveloffset=+3] +include::modules/manually-triggering-a-build-trigger.adoc[leveloffset=+3] + +//Custom Git Triggers +include::modules/proc_use-quay-git-trigger.adoc[leveloffset=+2] +//Notifications +include::modules/proc_use-quay-notifications.adoc[leveloffset=+1] +include::modules/notification-actions.adoc[leveloffset=+2] +include::modules/creating-notifications.adoc[leveloffset=+2] +include::modules/creating-image-expiration-notification.adoc[leveloffset=+3] +include::modules/creating-notifications-api.adoc[leveloffset=+2] +include::modules/repository-events.adoc[leveloffset=+2] +//helm +include::modules/oci-intro.adoc[leveloffset=+1] +include::modules/helm-oci-prereqs.adoc[leveloffset=+2] +include::modules/helm-oci-quay.adoc[leveloffset=+2] + +//cosign +include::modules/cosign-oci-intro.adoc[leveloffset=+2] +include::modules/cosign-oci-with-quay.adoc[leveloffset=+2] + +//other oci media types +//include::modules/using-other-oci-artifacts-with-quay.adoc[leveloffset=+2] + +//v2 UI +//include::modules/using-v2-ui.adoc[leveloffset=+1] \ No newline at end of file diff --git a/quay_io/modules b/quay_io/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/quay_io/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/red_hat_quay_operator_features/docinfo.xml b/red_hat_quay_operator_features/docinfo.xml new file mode 100644 index 000000000..096b57724 --- /dev/null +++ b/red_hat_quay_operator_features/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Advanced {productname} Operator features + + Advanced {productname} Operator features + + + Red Hat OpenShift Documentation Team + + diff --git a/red_hat_quay_operator_features/master.adoc b/red_hat_quay_operator_features/master.adoc new file mode 100644 index 000000000..19b3f86e9 --- /dev/null +++ b/red_hat_quay_operator_features/master.adoc @@ -0,0 +1,135 @@ +include::modules/attributes.adoc[] + +[id="quay-operator-advanced-features"] += {productname} Operator features +:context: operator-features + +// fips +include::modules/fips-overview.adoc[leveloffset=+1] + +//monitoring +include::modules/operator-console-monitoring-alerting.adoc[leveloffset=+1] + +//// +include::modules/configuring-port-mapping.adoc[leveloffset=+3] +include::modules/proc_deploy_quay_poc_db.adoc[leveloffset=+3] +include::modules/proc_deploy_quay_poc_redis.adoc[leveloffset=+3] +include::modules/operator-config-ui-access.adoc[leveloffset=+2] +include::modules/operator-config-ui-change.adoc[leveloffset=+2] +include::modules/operator-config-ui-monitoring.adoc[leveloffset=+2] +include::modules/operator-config-ui-updated.adoc[leveloffset=+2] +include::modules/config-ui-custom-ssl-certs.adoc[leveloffset=+2] + +include::modules/operator-external-access.adoc[leveloffset=+2] + +//move to using Operator +include::modules/operator-quayregistry-api.adoc[leveloffset=+2] +//// + +//clair +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2] +include::modules/clair-openshift.adoc[leveloffset=+2] +include::modules/clair-testing.adoc[leveloffset=+2] +include::modules/clair-advanced-configuration-overview.adoc[leveloffset=+2] +include::modules/clair-unmanaged.adoc[leveloffset=+3] +include::modules/unmanaging-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database.adoc[leveloffset=+4] +include::modules/custom-clair-configuration-managed-database.adoc[leveloffset=+3] +include::modules/managed-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database-managed.adoc[leveloffset=+4] +include::modules/clair-disconnected.adoc[leveloffset=+3] +include::modules/clair-clairctl.adoc[leveloffset=+4] +include::modules/clair-openshift-config.adoc[leveloffset=+5] +include::modules/clair-export-bundle.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-database.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-import-bundle.adoc[leveloffset=+5] +include::modules/clair-clairctl-standalone.adoc[leveloffset=+4] +include::modules/clair-standalone-config-location.adoc[leveloffset=+5] +include::modules/clair-export-bundle-standalone.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-database-standalone.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-import-bundle-standalone.adoc[leveloffset=+5] +//include::modules/clair-crda-configuration.adoc[leveloffset=+3] +include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+3] + +//infrastructure +include::modules/operator-deploy-infrastructure.adoc[leveloffset=+1] + +//single namespace +//include::modules/monitoring-single-namespace.adoc[leveloffset=+2] + +//resize storage +include::modules/operator-resize-storage.adoc[leveloffset=+2] + +//customize images +include::modules/operator-customize-images.adoc[leveloffset=+2] + +//cloudfront +include::modules/operator-cloudfront.adoc[leveloffset=+2] + +// builders +include::modules/build-enhancements.adoc[leveloffset=+1] +//include::modules/build-enhanced-arch.adoc[leveloffset=+2] +//include::modules/build-limitations.adoc[leveloffset=+2] +//include::modules/builders-virtual-environment.adoc[leveloffset=+2] + +//geo-replication +include::modules/georepl-intro.adoc[leveloffset=+1] +[discrete] +== Additional resources +* For more information about the geo-replication feature's architecture, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_architecture/index#georepl-intro[the architecture guide], which includes technical diagrams and a high-level overview. + +include::modules/arch-georpl-features.adoc[leveloffset=+2] +include::modules/georepl-prereqs.adoc[leveloffset=+2] +//include::modules/georepl-arch-operator.adoc[leveloffset=+2] +include::modules/georepl-deploy-operator.adoc[leveloffset=+3] +include::modules/georepl-mixed-storage.adoc[leveloffset=+3] +include::modules/upgrading-geo-repl-quay-operator.adoc[leveloffset=+2] +include::modules/operator-georepl-site-removal.adoc[leveloffset=+3] + + +//backup and restore +include::modules/backing-up-and-restoring-intro.adoc[leveloffset=+1] +include::modules/optional-enabling-read-only-mode-backup-restore-ocp.adoc[leveloffset=+2] +include::modules/backing-up-red-hat-quay-operator.adoc[leveloffset=+2] +include::modules/restoring-red-hat-quay.adoc[leveloffset=+2] + +//helm OCI +//include::modules/operator-helm-oci.adoc[leveloffset=+1] + +//volume size overrides +include::modules/operator-volume-size-overrides.adoc[leveloffset=+1] + +//cso +include::modules/proc_container-security-operator-setup.adoc[leveloffset=+1] + +//oidc + +//awssts +include::modules/configuring-aws-sts-quay.adoc[leveloffset=+1] +include::modules/configuring-quay-ocp-aws-sts.adoc[leveloffset=+2] + +//qbo +include::modules/conc_quay-bridge-operator.adoc[leveloffset=+1] +include::modules/proc_setting-up-quay-for-qbo.adoc[leveloffset=+2] +include::modules/proc_installing-qbo-on-ocp.adoc[leveloffset=+2] +include::modules/proc_creating-ocp-secret-for-oauth-token.adoc[leveloffset=+2] +include::modules/proc_creating-quay-integration-cr.adoc[leveloffset=+2] +include::modules/quay-bridge-operator-test.adoc[leveloffset=+2] + +//ipv6 +include::modules/operator-ipv6-dual-stack.adoc[leveloffset=+1] + +//custom certs on kubernetes +include::modules/config-custom-ssl-certs-kubernetes.adoc[leveloffset=+1] + + +//operator upgrade +include::modules/operator-upgrade.adoc[leveloffset=+1] + + + +[discrete] +== Additional resources +* For more details on the {productname} Operator, see the upstream +link:https://github.com/quay/quay-operator/[quay-operator] project. diff --git a/red_hat_quay_operator_features/modules b/red_hat_quay_operator_features/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/red_hat_quay_operator_features/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/red_hat_quay_overview/docinfo.xml b/red_hat_quay_overview/docinfo.xml new file mode 100644 index 000000000..aedbca614 --- /dev/null +++ b/red_hat_quay_overview/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +{productname} overview + + {productname} overview + + + Red Hat OpenShift Documentation Team + + diff --git a/red_hat_quay_overview/master.adoc b/red_hat_quay_overview/master.adoc new file mode 100644 index 000000000..6889fba55 --- /dev/null +++ b/red_hat_quay_overview/master.adoc @@ -0,0 +1,17 @@ +:_content-type: ASSEMBLY +include::modules/attributes.adoc[] + +[id="quay-overview"] += {productname} overview + +{productname} is a security-focused and scalable private registry platform for managing content across globally distributed data center and cloud environments. It provides a single and resilient content repository for delivering containerized software to development and production across {ocp} and Kubernetes clusters. {productname} is a distributed and highly available container image registry for your enterprise. + +{productname} can be used for storing, building, and distributing container images and other OCI artifacts. It offers an intuitive web interface that allows users to quickly upload and manage their container images. Administrators can create private repositories, ensuring sensitive or proprietary code remains secure within their organization. Additionally, access controls and team collaboration can be managed, which enables seamless sharing of container images among designated team members. + +{productname} addresses container security concerns through its image scanner, link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/index[Clair]. When enabled, the service automatically scans container images for known vulnerabilities and security issues, providing developers with valuable insights into potential risks and suggesting remediation steps. + +{productname} excels in automation, and supports integration with popular Continuous Integration/Continuous Deployment (CI/CD) tools and platforms, enabling seamless automation of the container build and deployment processes. As a result, developers can streamline their workflows, significantly reducing manual intervention and improving overall development efficiency. + +{productname} caters to the needs of both large and small-scale deployments. Its high availability support ensures that organizations can rely on it for mission-critical applications. The platform can handle significant container image traffic and offers efficient replication and distribution mechanisms to deliver container images to various geographical locations. + +include::modules/con_quay_intro.adoc[leveloffset=+1] \ No newline at end of file diff --git a/red_hat_quay_overview/modules b/red_hat_quay_overview/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/red_hat_quay_overview/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/release_notes/master.adoc b/release_notes/master.adoc index a03c903e4..33fa8dc6d 100644 --- a/release_notes/master.adoc +++ b/release_notes/master.adoc @@ -8,7 +8,7 @@ include::modules/attributes.adoc[] * Granular security management * Fast and robust at any scale * High velocity CI/CD -* Automated installation and upates +* Automated installation and updates * Enterprise authentication and team-based access control * {ocp} integration @@ -16,7 +16,7 @@ include::modules/attributes.adoc[] [IMPORTANT] ==== -{productname} only supports rolling back, or downgrading, to previous z-stream versions, for example, 3.7.2 -> 3.7.1. Rolling back to previous y-stream versions (3.7.0 -> 3.6.0) is not supported. This is because {productname} updates might contain database schema upgrades that are applied when upgrading to a new version of {productname}. Database schema upgrades are not considered backwards compatible. +{productname} only supports rolling back, or downgrading, to previous z-stream versions, for example, {producty-n1}.2 -> {producty-n1}.1. Rolling back to previous y-stream versions ({producty} -> {producty-n1}) is not supported. This is because {productname} updates might contain database schema upgrades that are applied when upgrading to a new version of {productname}. Database schema upgrades are not considered backwards compatible. Downgrading to previous z-streams is neither recommended nor supported by either Operator based deployments or virtual machine based deployments. Downgrading should only be done in extreme circumstances. The decision to rollback your {productname} deployment must be made in conjunction with the {productname} support and development teams. For more information, contact {productname} support. ==== @@ -33,6 +33,4 @@ Prior to version 2.9.2, {productname} was called Quay Enterprise. Documentation endif::downstream[] -include::modules/rn_3_80.adoc[leveloffset=+1] - -[discrete] +include::modules/rn_3_14_0.adoc[leveloffset=+1] \ No newline at end of file diff --git a/resources/notes.md b/resources/notes.md index 362b44360..0aabd1e6a 100644 --- a/resources/notes.md +++ b/resources/notes.md @@ -157,9 +157,9 @@ quay.quaylab.lan image_uri="http://porkchop.redhat.com/released/RHEL-7/7.5/Serve 1. Follow the [automation scripts]() to standalone install. - Run `ansible-playbook -i quay.inv_sample quaylab.yml -k` and fill in the prompts - You then need to install ceph - if you want to use it - - Note: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/#installing-a-red-hat-ceph-storage-cluster + - Note: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/#installing-a-red-hat-ceph-storage-cluster - You then need to create a USER: - - https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/object_gateway_guide_for_red_hat_enterprise_linux/configuration#creating_a_literal_radosgw_literal_user_for_s3_access + - https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/object_gateway_guide_for_red_hat_enterprise_linux/configuration#creating_a_literal_radosgw_literal_user_for_s3_access - You then need to create a bucket: (use the python script for this in the test directory). - `python s3bucket_create.py` << Be sure to edit variables in this using data from user create. diff --git a/resources/test/s3bucket_create.py b/resources/test/s3bucket_create.py index 238f1f8da..6420cd954 100644 --- a/resources/test/s3bucket_create.py +++ b/resources/test/s3bucket_create.py @@ -1,11 +1,11 @@ -# From: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/object_gateway_guide_for_red_hat_enterprise_linux/configuration#creating_a_literal_radosgw_literal_user_for_s3_access +# From: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/object_gateway_guide_for_red_hat_enterprise_linux/configuration#creating_a_literal_radosgw_literal_user_for_s3_access import boto import boto.s3.connection rdgw_hostname = "quay.quaylab.lan" rdgw_port = 8880 ## -# Fill this in after running: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/object_gateway_guide_for_red_hat_enterprise_linux/administration_cli#create_a_user +# Fill this in after running: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/object_gateway_guide_for_red_hat_enterprise_linux/administration_cli#create_a_user # $ radosgw-admin user create --uid=janedoe --display-name="Jane Doe" --email=jane@example.com ## access_key = $access diff --git a/securing_quay/docinfo.xml b/securing_quay/docinfo.xml new file mode 100644 index 000000000..858180e42 --- /dev/null +++ b/securing_quay/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Securing {productname} + + Securing {productname}: SSL/TLS, Certificates, and Encryption + + + Red Hat OpenShift Documentation Team + + diff --git a/securing_quay/master.adoc b/securing_quay/master.adoc new file mode 100644 index 000000000..a73b4fda0 --- /dev/null +++ b/securing_quay/master.adoc @@ -0,0 +1,45 @@ +include::modules/attributes.adoc[] + +:_content-type: ASSEMBLY +[id="securing-quay"] += Securing {productname} +:context: quay-security + +{productname} offers administrators the ability to secure communication and trusted access to their repositories through the use of Transport Layer Security (TLS), certificate management, and encryption techniques. Properly configuring SSL/TLS and implementing custom certificates can help safeguard data, secure external connections, and maintain trust between {productname} and the integrated services of your choosing. + +The following topics are covered: + +* Configuring custom SSL/TLS certificates for standalone {productname} deployments +* Configuring custom SSL/TLS certificates for {productname-ocp} +* Adding additional Certificate Authorities to the {productname} container +* Adding additional Certificate Authorities to {productname-ocp} + +//creating ssl-tls-certificates +include::modules/ssl-tls-quay-overview.adoc[leveloffset=+1] +include::modules/ssl-create-certs.adoc[leveloffset=+2] +//SSL/TLS Standalone +include::modules/configuring-ssl-tls.adoc[leveloffset=+2] +include::modules/ssl-config-cli.adoc[leveloffset=+3] +include::modules/ssl-trust-ca-podman.adoc[leveloffset=+3] +include::modules/ssl-trust-ca-system.adoc[leveloffset=+3] +//SSL/TLS Operator +include::modules/operator-custom-ssl-certs-config-bundle.adoc[leveloffset=+2] +include::modules/creating-custom-ssl-certs-config-bundle.adoc[leveloffset=+3] + +//PostgreSQL SSL/TLS certificates +include::modules/ssl-tls-sql.adoc[leveloffset=+1] +include::modules/configuring-cert-based-auth-quay-cloudsql.adoc[leveloffset=+2] + + +//additional ca certificates +include::modules/config-extra-ca-certs-quay.adoc[leveloffset=+1] +//Additional CA Certificates standalone +include::modules/config-custom-ssl-certs-manual.adoc[leveloffset=+2] +//Additional CA Certificates Operator +include::modules/config-additional-ca-certs-operator.adoc[leveloffset=+2] +include::modules/operator-config-cli-download.adoc[leveloffset=+3] +include::modules/adding-ca-certs-to-config.adoc[leveloffset=+3] +//Kubernetes +include::modules/config-custom-ssl-certs-kubernetes.adoc[leveloffset=+2] + +//isolated builds diff --git a/securing_quay/modules b/securing_quay/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/securing_quay/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/troubleshooting_quay/docinfo.xml b/troubleshooting_quay/docinfo.xml new file mode 100644 index 000000000..09aac9ba9 --- /dev/null +++ b/troubleshooting_quay/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Troubleshooting {productname} + + Troubleshooting {productname} + + + Red Hat OpenShift Documentation Team + + diff --git a/troubleshooting_quay/master.adoc b/troubleshooting_quay/master.adoc new file mode 100644 index 000000000..4df6116fc --- /dev/null +++ b/troubleshooting_quay/master.adoc @@ -0,0 +1,115 @@ +include::modules/attributes.adoc[] +:_content-type: ASSEMBLY +[id="support-overview"] += Troubleshooting {productname} + +Red Hat offers administrators tools for gathering data for your {productname} deployment. You can use this data to troubleshoot your {productname} deployment yourself, or file a support ticket. + +//Support +include::modules/getting-support.adoc[leveloffset=+1] + +//Debug mode +include::modules/running-quay-debug-mode-intro.adoc[leveloffset=+1] +include::modules/running-quay-debug-mode.adoc[leveloffset=+2] +include::modules/running-ldap-debug-mode.adoc[leveloffset=+2] +include::modules/running-operator-debug-mode.adoc[leveloffset=+2] + +//quay logs +include::modules/obtaining-quay-logs.adoc[leveloffset=+1] + +//quay config +include::modules/obtaining-quay-config-information.adoc[leveloffset=+1] + +//health-check +include::modules/health-check-quay.adoc[leveloffset=+1] + +//Troubleshooting components +include::modules/troubleshooting-components.adoc[leveloffset=+1] +// Database +include::modules/database-troubleshooting.adoc[leveloffset=+2] +include::modules/database-troubleshooting-issues.adoc[leveloffset=+3] +include::modules/troubleshooting-forgotten-passwords.adoc[leveloffset=+3] +include::modules/resetting-superuser-password-on-operator.adoc[leveloffset=+3] + +// Authentication +include::modules/authentication-troubleshooting.adoc[leveloffset=+2] +include::modules/authentication-troubleshooting-issues.adoc[leveloffset=+3] + +//Storage +include::modules/storage-troubleshooting.adoc[leveloffset=+2] +include::modules/storage-troubleshooting-issues.adoc[leveloffset=+3] +//include::modules/changing-storage-solution.adoc[leveloffset=+3] +//include::modules/connecting-s3-timeout.adoc[leveloffset=+3] + +//Geo replication +include::modules/georepl-intro.adoc[leveloffset=+2] +include::modules/geo-repl-troubleshooting-issues.adoc[leveloffset=+3] +//include::modules/storage-health-check-geo-repl.adoc[leveloffset=+3] +//include::modules/storage-buckets-not-synced.adoc[leveloffset=+3] +//include::modules/geo-repl-sslerror.adoc[leveloffset=+3] + +//Repository mirroring +include::modules/mirroring-intro.adoc[leveloffset=+2] +include::modules/repo-mirroring-troubleshooting-issues.adoc[leveloffset=+3] +//include::modules/mirroring-invalid-credentials.adoc[leveloffset=+3] +//include::modules/missing-runc-files.adoc[leveloffset=+3] +//include::modules/signature-does-not-exist.adoc[leveloffset=+3] + + +//Clair +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+2] +//include::modules/clair-concepts.adoc[leveloffset=+3] +include::modules/clair-troubleshooting-issues.adoc[leveloffset=+3] +//include::modules/unsupported-security-scan.adoc[leveloffset=+3] +//include::modules/scans-not-working-behind-proxy.adoc[leveloffset=+3] +//include::modules/connection-issues-clair-quay-db.adoc[leveloffset=+3] +//include::modules/java-image-scan-not-working.adoc[leveloffset=+3] + + +//// + + +[id="troubleshooting-quay"] += Troubleshooting {productname} + +Use the content in this guide to troubleshoot your {productname} registry on both standalone and Operator based deployments. + + +//General Troubleshooting +include::modules/troubleshooting-general.adoc[leveloffset=+1] +include::modules/troubleshooting-401-helm.adoc[leveloffset=+2] +include::modules/error-403-troubleshooting.adoc[leveloffset=+2] +include::modules/error-406-dockerfile.adoc[leveloffset=+2] +include::modules/error-429-troubleshooting.adoc[leveloffset=+2] +include::modules/error-500-troubleshooting.adoc[leveloffset=+2] +include::modules/error-502-troubleshooting.adoc[leveloffset=+2] +include::modules/build-trigger-error.adoc[leveloffset=+2] +include::modules/build-logs-not-loading.adoc[leveloffset=+2] +include::modules/cannot-access-private-repo.adoc[leveloffset=+2] +include::modules/cannot-locate-dockerfile.adoc[leveloffset=+2] +include::modules/cannot-reach-registry-endpoint.adoc[leveloffset=+2] +include::modules/docker-failing-pulls.adoc[leveloffset=+2] +include::modules/docker-io-timeout.adoc[leveloffset=+2] +include::modules/docker-login-error.adoc[leveloffset=+2] +include::modules/docker-timestamp-error.adoc[leveloffset=+2] +include::modules/marathon-mesos-fail.adoc[leveloffset=+2] +include::modules/mirrored-images-unable-pull-rhocp.adoc[leveloffset=+2] +include::modules/secrets-garbage-collected.adoc[leveloffset=+2] +include::modules/troubleshooting-slow-pushes.adoc[leveloffset=+2] + + + +//how tos +//include::modules/troubleshooting-how-tos.adoc[leveloffset=+2] +//include::modules/how-to-list-quay-repos.adoc[leveloffset=+3] +//include::modules/rotating-log-files.adoc[leveloffset=+3] + +//faqs +include::modules/frequently-asked-questions.adoc[leveloffset=+2] +include::modules/clair-distroless-container-images.adoc[leveloffset=+3] +include::modules/operator-geo-replication.adoc[leveloffset=+3] +include::modules/ldap-timeouts-quay.adoc[leveloffset=+3] +include::modules/limit-organization-creation.adoc[leveloffset=+3] +include::modules/resource-demand-failed-operator.adoc[leveloffset=+3] +include::modules/nested-ldap-team-sync.adoc[leveloffset=+3] +//// \ No newline at end of file diff --git a/troubleshooting_quay/modules b/troubleshooting_quay/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/troubleshooting_quay/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/upgrade_quay/master.adoc b/upgrade_quay/master.adoc index 13313e021..9891ffac7 100644 --- a/upgrade_quay/master.adoc +++ b/upgrade_quay/master.adoc @@ -1,22 +1,18 @@ include::modules/attributes.adoc[] - -[id='upgrade-quay-v3'] +[id="upgrade-quay-v3"] = Upgrade {productname} -== Upgrade overview - -The upgrade procedure for {productname} depends on the type of installation you are using. +The upgrade procedure for {productname} depends on the type of installation that you are using. -The {productname} Operator provides a simple method to deploy and manage a {productname} cluster. This is the preferred procedure for deploying {productname} on OpenShift. +The {productname} Operator provides a simple method to deploy and manage a {productname} cluster. This is the preferred procedure for deploying {productname} on {ocp}. -The {productname} Operator should be upgraded using the link:https://docs.openshift.com/container-platform/4.7/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)] as described in the section "Upgrading Quay using the Quay Operator". +The {productname} Operator should be upgraded using the link:https://docs.openshift.com/container-platform/{ocp-y}/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)] as described in the section "Upgrading Quay using the Quay Operator". -The procedure for upgrading a proof-of-concept or highly available installation of {productname} and Clair is documented in the section "Standalone upgrade". +The procedure for upgrading a proof of concept or highly available installation of {productname} and Clair is documented in the section "Standalone upgrade". include::modules/operator-upgrade.adoc[leveloffset=+1] - include::modules/proc_upgrade_standalone.adoc[leveloffset=+1] - +include::modules/upgrading-geo-repl-quay.adoc[leveloffset=+1] +include::modules/upgrading-geo-repl-quay-operator.adoc[leveloffset=+1] include::modules/qbo-operator-upgrade.adoc[leveloffset=+1] - include::modules/downgrade-quay-deployment.adoc[leveloffset=+1] diff --git a/use_quay/master.adoc b/use_quay/master.adoc index 2c8d36b6f..f155ae1f2 100644 --- a/use_quay/master.adoc +++ b/use_quay/master.adoc @@ -1,61 +1,142 @@ include::modules/attributes.adoc[] +:_content-type: ASSEMBLY [id='use-quay'] = Use {productname} +:context: use-quay -{productname} container image registries let you store container images in a -central location. As a regular user of a {productname} registry, you can -create repositories to organize your images and selectively add read (pull) -and write (push) access to the repositories you control. A user with -administrative privileges can perform a broader set of tasks, such as the -ability to add users and control default settings. +{productname} container image registries serve as centralized hubs for storing container images. Users of {productname} can create repositories to effectively manage images and grant specific read (pull) and write (push) permissions to the repositories as deemed necessary. Administrative privileges expand these capabilities, allowing users to perform a broader set of tasks, like the ability to add users and control default settings. -This guide assumes you have a {productname} deployed and are ready to start -setting it up and using it. +This guide offers an overview of {productname}'s users and organizations, its tenancy model, and basic operations like creating and deleting users, organizations, and repositories, handling access, and interacting with tags. It includes both UI and API operations. +[NOTE] +==== +The following API endpoints are linked to their associated entry in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API guide]. The {productname} API guide provides more information about each endpoint, such as response codes and optional query parameters. +==== +//intro and tenancy include::modules/user-org-intro.adoc[leveloffset=+1] include::modules/tenancy-model.adoc[leveloffset=+2] -include::modules/user-create.adoc[leveloffset=+2] +//Red Hat Quay API +include::modules/enabling-using-the-api.adoc[leveloffset=+1] + +//creating and deleting users +include::modules/user-create.adoc[leveloffset=+1] +include::modules/creating-user-account-quay-ui.adoc[leveloffset=+2] +include::modules/creating-user-account-quay-api.adoc[leveloffset=+2] +include::modules/deleting-user-ui.adoc[leveloffset=+2] +include::modules/deleting-user-cli-api.adoc[leveloffset=+2] +//organizations overview +include::modules/organizations-overview.adoc[leveloffset=+1] include::modules/org-create.adoc[leveloffset=+2] - - +include::modules/org-create-api.adoc[leveloffset=+2] +include::modules/organization-settings-v2-ui.adoc[leveloffset=+2] +//organization settings API? +include::modules/org-delete.adoc[leveloffset=+2] +include::modules/org-delete-api.adoc[leveloffset=+2] +//repositories overview include::modules/proc_use-quay-create-repo.adoc[leveloffset=+1] - +include::modules/creating-an-image-repository-via-the-ui.adoc[leveloffset=+2] +include::modules/creating-an-image-repository-via-docker.adoc[leveloffset=+2] +include::modules/creating-an-image-repository-via-skopeo-copy.adoc[leveloffset=+2] +include::modules/creating-an-image-repository-via-the-api.adoc[leveloffset=+2] +include::modules/deleting-an-image-repository-via-ui.adoc[leveloffset=+2] +include::modules/deleting-an-image-repository-via-the-api.adoc[leveloffset=+2] + +//robot accounts +include::modules/robot-account-overview.adoc[leveloffset=+1] +include::modules/creating-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/creating-robot-account-api.adoc[leveloffset=+2] +include::modules/managing-robot-account-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/disabling-robot-account.adoc[leveloffset=+2] +include::modules/regenerating-robot-account-token-api.adoc[leveloffset=+2] +include::modules/deleting-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/deleting-robot-account-api.adoc[leveloffset=+2] +// federation +include::modules/keyless-authentication-robot-accounts.adoc[leveloffset=+2] + +//access management repositories include::modules/proc_use-quay-manage-repo.adoc[leveloffset=+1] - -include::modules/proc_use-quay-tags.adoc[leveloffset=+1] - +include::modules/teams-overview.adoc[leveloffset=+2] +include::modules/creating-a-team-ui.adoc[leveloffset=+3] +include::modules/creating-a-team-api.adoc[leveloffset=+3] + +include::modules/managing-team-ui.adoc[leveloffset=+3] +include::modules/add-users-to-team.adoc[leveloffset=+4] +include::modules/set-team-role.adoc[leveloffset=+4] +include::modules/managing-team-members-repo-permissions-ui.adoc[leveloffset=+4] +include::modules/viewing-additional-info-about-team-ui.adoc[leveloffset=+4] + +include::modules/managing-a-team-api.adoc[leveloffset=+3] +include::modules/managing-team-members-api.adoc[leveloffset=+4] +include::modules/setting-role-of-team-within-organization-api.adoc[leveloffset=+4] +include::modules/deleting-team-within-organization-api.adoc[leveloffset=+4] + +include::modules/default-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/default-permissions-api.adoc[leveloffset=+2] +include::modules/allow-access-user-repo.adoc[leveloffset=+2] +include::modules/adjust-access-user-repo-api.adoc[leveloffset=+2] + +//image tags overview +include::modules/image-tags-overview.adoc[leveloffset=+1] +include::modules/viewing-and-modifying-tags.adoc[leveloffset=+2] +include::modules/viewing-model-card-information.adoc[leveloffset=+3] +include::modules/viewing-tags-api.adoc[leveloffset=+2] +include::modules/adding-a-new-tag-to-image.adoc[leveloffset=+2] +include::modules/adding-a-new-tag-to-image-api.adoc[leveloffset=+2] +include::modules/adding-managing-labels.adoc[leveloffset=+2] +include::modules/adding-managing-labels-api.adoc[leveloffset=+2] +include::modules/setting-tag-expirations-v2-ui.adoc[leveloffset=+2] +include::modules/setting-tag-expiration-api.adoc[leveloffset=+3] +include::modules/fetching-images-and-tags.adoc[leveloffset=+2] + +include::modules/viewing-tag-history-v2-ui.adoc[leveloffset=+2] +include::modules/viewing-tag-history-v2-api.adoc[leveloffset=+2] +include::modules/deleting-a-tag.adoc[leveloffset=+2] +include::modules/deleting-a-tag-api.adoc[leveloffset=+2] +include::modules/reverting-tag-changes.adoc[leveloffset=+2] +include::modules/reverting-tag-changes-api.adoc[leveloffset=+2] + +//logs include::modules/proc_use-quay-view-export-logs.adoc[leveloffset=+1] +include::modules/viewing-usage-logs-v2-ui.adoc[leveloffset=+2] +include::modules/viewing-usage-logs-api.adoc[leveloffset=+2] +include::modules/use-quay-export-logs.adoc[leveloffset=+2] +include::modules/use-quay-export-logs-api.adoc[leveloffset=+2] +//security scans +include::modules/security-scanning.adoc[leveloffset=+1] +include::modules/security-scanning-ui.adoc[leveloffset=+2] +include::modules/security-scanning-api.adoc[leveloffset=+2] +//Notifications and events +include::modules/proc_use-quay-notifications.adoc[leveloffset=+1] +include::modules/notification-actions.adoc[leveloffset=+2] +include::modules/creating-notifications.adoc[leveloffset=+2] +include::modules/creating-image-expiration-notification.adoc[leveloffset=+3] +include::modules/creating-notifications-api.adoc[leveloffset=+2] +include::modules/repository-events.adoc[leveloffset=+2] -include::modules/proc_use-quay-build-workers-dockerfiles.adoc[leveloffset=+1] - -include::modules/proc_use-quay-build-dockerfiles.adoc[leveloffset=+1] - -include::modules/proc_use-quay-git-trigger.adoc[leveloffset=+1] +//docker files +//include::modules/proc_use-quay-build-workers-dockerfiles.adoc[leveloffset=+1] +//include::modules/proc_use-quay-build-dockerfiles.adoc[leveloffset=+1] -include::modules/proc_use-quay-skip-trigger.adoc[leveloffset=+1] +//custom trigger +//include::modules/proc_use-quay-git-trigger.adoc[leveloffset=+2] -include::modules/proc_github-build-triggers.adoc[leveloffset=+1] +//include::modules/proc_use-quay-skip-trigger.adoc[leveloffset=+1] -include::modules/proc_github-app.adoc[leveloffset=+1] +//include::modules/proc_github-build-triggers.adoc[leveloffset=+1] -include::modules/proc_use-quay-notifications.adoc[leveloffset=+1] +//github oauth? +//include::modules/proc_github-app.adoc[leveloffset=+1] -include::modules/oci-intro.adoc[leveloffset=+1] -include::modules/helm-oci-prereqs.adoc[leveloffset=+2] -include::modules/helm-oci-quay.adoc[leveloffset=+2] -include::modules/config-fields-helm-oci.adoc[leveloffset=+2] -include::modules/cosign-oci-intro.adoc[leveloffset=+2] -include::modules/cosign-oci-with-quay.adoc[leveloffset=+2] -include::modules/other-oci-artifacts-with-quay.adoc[leveloffset=+2] -include::modules/disable-oci-artifacts-in-quay.adoc[leveloffset=+2] // Quota Management include::modules/quota-management-and-enforcement.adoc[leveloffset=+1] include::modules/quota-management-arch.adoc[leveloffset=+2] include::modules/quota-management-limitations.adoc[leveloffset=+2] -include::modules/config-fields-quota.adoc[leveloffset=+2] +include::modules/config-fields-quota-management.adoc[leveloffset=+2] + + include::modules/quota-establishment-api.adoc[leveloffset=+2] //Proxy getProxyCache @@ -66,9 +147,18 @@ include::modules/proxy-cache-procedure.adoc[leveloffset=+2] include::modules/proxy-cache-leveraging-storage-quota-limits.adoc[leveloffset=+2] // Virtual builders -include::modules/build-enhancements.adoc[leveloffset=+1] -include::modules/build-enhanced-arch.adoc[leveloffset=+2] -include::modules/build-limitations.adoc[leveloffset=+2] -include::modules/builders-virtual-environment.adoc[leveloffset=+2] +//include::modules/build-enhancements.adoc[leveloffset=+1] +//include::modules/build-enhanced-arch.adoc[leveloffset=+2] +//include::modules/build-limitations.adoc[leveloffset=+2] +//include::modules/builders-virtual-environment.adoc[leveloffset=+2] + +//oci +include::modules/oci-intro.adoc[leveloffset=+1] +include::modules/helm-oci-prereqs.adoc[leveloffset=+2] +include::modules/helm-oci-quay.adoc[leveloffset=+2] +include::modules/oras-annotation-parsing.adoc[leveloffset=+2] +include::modules/testing-oci-support.adoc[leveloffset=+2] -include::modules/proc_use-api.adoc[leveloffset=+1] +//cosign +//include::modules/cosign-oci-intro.adoc[leveloffset=+2] +//include::modules/cosign-oci-with-quay.adoc[leveloffset=+2] diff --git a/welcome.adoc b/welcome.adoc index f31ddffe8..7c7e436ed 100644 --- a/welcome.adoc +++ b/welcome.adoc @@ -16,7 +16,7 @@ If you want to develop Quay, please see https://github.com/quay/quay/blob/master Quay can be deployed in a variety of configurations, both within and outside of Kubernetes. For automated deployments, the Quay Operator is recommended. The documentation below provides instructions on how to set up Quay via the operator or manually. -xref:deploy_quay_on_openshift_op_tng.adoc[Deploy with Openshift Operator] +xref:deploy_red_hat_quay_operator.adoc[Deploy with Openshift Operator] xref:deploy_quay.adoc[Deploy Proof of Concept] @@ -32,9 +32,13 @@ xref:upgrade_quay.adoc[Upgrade {productname}] == Using Quay +xref:red_hat_quay_operator_features[Red Hat Quay Operator features] + xref:use_quay.adoc[Use {productname}] xref:api_quay.adoc[{productname} API Guide] +xref:quay_io.adoc[{quayio}] + NOTE: Help make {productname} docs better on https://github.com/quay/quay-docs[github]